libstdc++
shared_mutex
Go to the documentation of this file.
00001 // <shared_mutex> -*- C++ -*-
00002 
00003 // Copyright (C) 2013-2018 Free Software Foundation, Inc.
00004 //
00005 // This file is part of the GNU ISO C++ Library.  This library is free
00006 // software; you can redistribute it and/or modify it under the
00007 // terms of the GNU General Public License as published by the
00008 // Free Software Foundation; either version 3, or (at your option)
00009 // any later version.
00010 
00011 // This library is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 
00016 // Under Section 7 of GPL version 3, you are granted additional
00017 // permissions described in the GCC Runtime Library Exception, version
00018 // 3.1, as published by the Free Software Foundation.
00019 
00020 // You should have received a copy of the GNU General Public License and
00021 // a copy of the GCC Runtime Library Exception along with this program;
00022 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
00023 // <http://www.gnu.org/licenses/>.
00024 
00025 /** @file include/shared_mutex
00026  *  This is a Standard C++ Library header.
00027  */
00028 
00029 #ifndef _GLIBCXX_SHARED_MUTEX
00030 #define _GLIBCXX_SHARED_MUTEX 1
00031 
00032 #pragma GCC system_header
00033 
00034 #if __cplusplus >= 201402L
00035 
00036 #include <bits/c++config.h>
00037 #include <condition_variable>
00038 #include <bits/functexcept.h>
00039 
00040 namespace std _GLIBCXX_VISIBILITY(default)
00041 {
00042 _GLIBCXX_BEGIN_NAMESPACE_VERSION
00043 
00044   /**
00045    * @ingroup mutexes
00046    * @{
00047    */
00048 
00049 #ifdef _GLIBCXX_USE_C99_STDINT_TR1
00050 #ifdef _GLIBCXX_HAS_GTHREADS
00051 
00052 #if __cplusplus >= 201703L
00053 #define __cpp_lib_shared_mutex 201505
00054   class shared_mutex;
00055 #endif
00056 
00057 #define __cpp_lib_shared_timed_mutex 201402
00058   class shared_timed_mutex;
00059 
00060 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
00061   /// A shared mutex type implemented using pthread_rwlock_t.
00062   class __shared_mutex_pthread
00063   {
00064     friend class shared_timed_mutex;
00065 
00066 #ifdef PTHREAD_RWLOCK_INITIALIZER
00067     pthread_rwlock_t    _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
00068 
00069   public:
00070     __shared_mutex_pthread() = default;
00071     ~__shared_mutex_pthread() = default;
00072 #else
00073     pthread_rwlock_t    _M_rwlock;
00074 
00075   public:
00076     __shared_mutex_pthread()
00077     {
00078       int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
00079       if (__ret == ENOMEM)
00080         __throw_bad_alloc();
00081       else if (__ret == EAGAIN)
00082         __throw_system_error(int(errc::resource_unavailable_try_again));
00083       else if (__ret == EPERM)
00084         __throw_system_error(int(errc::operation_not_permitted));
00085       // Errors not handled: EBUSY, EINVAL
00086       __glibcxx_assert(__ret == 0);
00087     }
00088 
00089     ~__shared_mutex_pthread()
00090     {
00091       int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
00092       // Errors not handled: EBUSY, EINVAL
00093       __glibcxx_assert(__ret == 0);
00094     }
00095 #endif
00096 
00097     __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
00098     __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
00099 
00100     void
00101     lock()
00102     {
00103       int __ret = pthread_rwlock_wrlock(&_M_rwlock);
00104       if (__ret == EDEADLK)
00105         __throw_system_error(int(errc::resource_deadlock_would_occur));
00106       // Errors not handled: EINVAL
00107       __glibcxx_assert(__ret == 0);
00108     }
00109 
00110     bool
00111     try_lock()
00112     {
00113       int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
00114       if (__ret == EBUSY) return false;
00115       // Errors not handled: EINVAL
00116       __glibcxx_assert(__ret == 0);
00117       return true;
00118     }
00119 
00120     void
00121     unlock()
00122     {
00123       int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
00124       // Errors not handled: EPERM, EBUSY, EINVAL
00125       __glibcxx_assert(__ret == 0);
00126     }
00127 
00128     // Shared ownership
00129 
00130     void
00131     lock_shared()
00132     {
00133       int __ret;
00134       // We retry if we exceeded the maximum number of read locks supported by
00135       // the POSIX implementation; this can result in busy-waiting, but this
00136       // is okay based on the current specification of forward progress
00137       // guarantees by the standard.
00138       do
00139         __ret = pthread_rwlock_rdlock(&_M_rwlock);
00140       while (__ret == EAGAIN);
00141       if (__ret == EDEADLK)
00142         __throw_system_error(int(errc::resource_deadlock_would_occur));
00143       // Errors not handled: EINVAL
00144       __glibcxx_assert(__ret == 0);
00145     }
00146 
00147     bool
00148     try_lock_shared()
00149     {
00150       int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
00151       // If the maximum number of read locks has been exceeded, we just fail
00152       // to acquire the lock.  Unlike for lock(), we are not allowed to throw
00153       // an exception.
00154       if (__ret == EBUSY || __ret == EAGAIN) return false;
00155       // Errors not handled: EINVAL
00156       __glibcxx_assert(__ret == 0);
00157       return true;
00158     }
00159 
00160     void
00161     unlock_shared()
00162     {
00163       unlock();
00164     }
00165 
00166     void* native_handle() { return &_M_rwlock; }
00167   };
00168 #endif
00169 
00170 #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
00171   /// A shared mutex type implemented using std::condition_variable.
00172   class __shared_mutex_cv
00173   {
00174     friend class shared_timed_mutex;
00175 
00176     // Based on Howard Hinnant's reference implementation from N2406.
00177 
00178     // The high bit of _M_state is the write-entered flag which is set to
00179     // indicate a writer has taken the lock or is queuing to take the lock.
00180     // The remaining bits are the count of reader locks.
00181     //
00182     // To take a reader lock, block on gate1 while the write-entered flag is
00183     // set or the maximum number of reader locks is held, then increment the
00184     // reader lock count.
00185     // To release, decrement the count, then if the write-entered flag is set
00186     // and the count is zero then signal gate2 to wake a queued writer,
00187     // otherwise if the maximum number of reader locks was held signal gate1
00188     // to wake a reader.
00189     //
00190     // To take a writer lock, block on gate1 while the write-entered flag is
00191     // set, then set the write-entered flag to start queueing, then block on
00192     // gate2 while the number of reader locks is non-zero.
00193     // To release, unset the write-entered flag and signal gate1 to wake all
00194     // blocked readers and writers.
00195     //
00196     // This means that when no reader locks are held readers and writers get
00197     // equal priority. When one or more reader locks is held a writer gets
00198     // priority and no more reader locks can be taken while the writer is
00199     // queued.
00200 
00201     // Only locked when accessing _M_state or waiting on condition variables.
00202     mutex               _M_mut;
00203     // Used to block while write-entered is set or reader count at maximum.
00204     condition_variable  _M_gate1;
00205     // Used to block queued writers while reader count is non-zero.
00206     condition_variable  _M_gate2;
00207     // The write-entered flag and reader count.
00208     unsigned            _M_state;
00209 
00210     static constexpr unsigned _S_write_entered
00211       = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
00212     static constexpr unsigned _S_max_readers = ~_S_write_entered;
00213 
00214     // Test whether the write-entered flag is set. _M_mut must be locked.
00215     bool _M_write_entered() const { return _M_state & _S_write_entered; }
00216 
00217     // The number of reader locks currently held. _M_mut must be locked.
00218     unsigned _M_readers() const { return _M_state & _S_max_readers; }
00219 
00220   public:
00221     __shared_mutex_cv() : _M_state(0) {}
00222 
00223     ~__shared_mutex_cv()
00224     {
00225       __glibcxx_assert( _M_state == 0 );
00226     }
00227 
00228     __shared_mutex_cv(const __shared_mutex_cv&) = delete;
00229     __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
00230 
00231     // Exclusive ownership
00232 
00233     void
00234     lock()
00235     {
00236       unique_lock<mutex> __lk(_M_mut);
00237       // Wait until we can set the write-entered flag.
00238       _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
00239       _M_state |= _S_write_entered;
00240       // Then wait until there are no more readers.
00241       _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
00242     }
00243 
00244     bool
00245     try_lock()
00246     {
00247       unique_lock<mutex> __lk(_M_mut, try_to_lock);
00248       if (__lk.owns_lock() && _M_state == 0)
00249         {
00250           _M_state = _S_write_entered;
00251           return true;
00252         }
00253       return false;
00254     }
00255 
00256     void
00257     unlock()
00258     {
00259       lock_guard<mutex> __lk(_M_mut);
00260       __glibcxx_assert( _M_write_entered() );
00261       _M_state = 0;
00262       // call notify_all() while mutex is held so that another thread can't
00263       // lock and unlock the mutex then destroy *this before we make the call.
00264       _M_gate1.notify_all();
00265     }
00266 
00267     // Shared ownership
00268 
00269     void
00270     lock_shared()
00271     {
00272       unique_lock<mutex> __lk(_M_mut);
00273       _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
00274       ++_M_state;
00275     }
00276 
00277     bool
00278     try_lock_shared()
00279     {
00280       unique_lock<mutex> __lk(_M_mut, try_to_lock);
00281       if (!__lk.owns_lock())
00282         return false;
00283       if (_M_state < _S_max_readers)
00284         {
00285           ++_M_state;
00286           return true;
00287         }
00288       return false;
00289     }
00290 
00291     void
00292     unlock_shared()
00293     {
00294       lock_guard<mutex> __lk(_M_mut);
00295       __glibcxx_assert( _M_readers() > 0 );
00296       auto __prev = _M_state--;
00297       if (_M_write_entered())
00298         {
00299           // Wake the queued writer if there are no more readers.
00300           if (_M_readers() == 0)
00301             _M_gate2.notify_one();
00302           // No need to notify gate1 because we give priority to the queued
00303           // writer, and that writer will eventually notify gate1 after it
00304           // clears the write-entered flag.
00305         }
00306       else
00307         {
00308           // Wake any thread that was blocked on reader overflow.
00309           if (__prev == _S_max_readers)
00310             _M_gate1.notify_one();
00311         }
00312     }
00313   };
00314 #endif
00315 
00316 #if __cplusplus > 201402L
00317   /// The standard shared mutex type.
00318   class shared_mutex
00319   {
00320   public:
00321     shared_mutex() = default;
00322     ~shared_mutex() = default;
00323 
00324     shared_mutex(const shared_mutex&) = delete;
00325     shared_mutex& operator=(const shared_mutex&) = delete;
00326 
00327     // Exclusive ownership
00328 
00329     void lock() { _M_impl.lock(); }
00330     bool try_lock() { return _M_impl.try_lock(); }
00331     void unlock() { _M_impl.unlock(); }
00332 
00333     // Shared ownership
00334 
00335     void lock_shared() { _M_impl.lock_shared(); }
00336     bool try_lock_shared() { return _M_impl.try_lock_shared(); }
00337     void unlock_shared() { _M_impl.unlock_shared(); }
00338 
00339 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
00340     typedef void* native_handle_type;
00341     native_handle_type native_handle() { return _M_impl.native_handle(); }
00342 
00343   private:
00344     __shared_mutex_pthread _M_impl;
00345 #else
00346   private:
00347     __shared_mutex_cv _M_impl;
00348 #endif
00349   };
00350 #endif // C++17
00351 
00352 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
00353   using __shared_timed_mutex_base = __shared_mutex_pthread;
00354 #else
00355   using __shared_timed_mutex_base = __shared_mutex_cv;
00356 #endif
00357 
00358   /// The standard shared timed mutex type.
00359   class shared_timed_mutex
00360   : private __shared_timed_mutex_base
00361   {
00362     using _Base = __shared_timed_mutex_base;
00363 
00364     // Must use the same clock as condition_variable for __shared_mutex_cv.
00365     typedef chrono::system_clock        __clock_t;
00366 
00367   public:
00368     shared_timed_mutex() = default;
00369     ~shared_timed_mutex() = default;
00370 
00371     shared_timed_mutex(const shared_timed_mutex&) = delete;
00372     shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
00373 
00374     // Exclusive ownership
00375 
00376     void lock() { _Base::lock(); }
00377     bool try_lock() { return _Base::try_lock(); }
00378     void unlock() { _Base::unlock(); }
00379 
00380     template<typename _Rep, typename _Period>
00381       bool
00382       try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
00383       {
00384         return try_lock_until(__clock_t::now() + __rel_time);
00385       }
00386 
00387     // Shared ownership
00388 
00389     void lock_shared() { _Base::lock_shared(); }
00390     bool try_lock_shared() { return _Base::try_lock_shared(); }
00391     void unlock_shared() { _Base::unlock_shared(); }
00392 
00393     template<typename _Rep, typename _Period>
00394       bool
00395       try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
00396       {
00397         return try_lock_shared_until(__clock_t::now() + __rel_time);
00398       }
00399 
00400 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
00401 
00402     // Exclusive ownership
00403 
00404     template<typename _Duration>
00405       bool
00406       try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
00407       {
00408         auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
00409         auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
00410 
00411         __gthread_time_t __ts =
00412           {
00413             static_cast<std::time_t>(__s.time_since_epoch().count()),
00414             static_cast<long>(__ns.count())
00415           };
00416 
00417         int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
00418         // On self-deadlock, we just fail to acquire the lock.  Technically,
00419         // the program violated the precondition.
00420         if (__ret == ETIMEDOUT || __ret == EDEADLK)
00421           return false;
00422         // Errors not handled: EINVAL
00423         __glibcxx_assert(__ret == 0);
00424         return true;
00425       }
00426 
00427     template<typename _Clock, typename _Duration>
00428       bool
00429       try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
00430       {
00431         // DR 887 - Sync unknown clock to known clock.
00432         const typename _Clock::time_point __c_entry = _Clock::now();
00433         const __clock_t::time_point __s_entry = __clock_t::now();
00434         const auto __delta = __abs_time - __c_entry;
00435         const auto __s_atime = __s_entry + __delta;
00436         return try_lock_until(__s_atime);
00437       }
00438 
00439     // Shared ownership
00440 
00441     template<typename _Duration>
00442       bool
00443       try_lock_shared_until(const chrono::time_point<__clock_t,
00444                             _Duration>& __atime)
00445       {
00446         auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
00447         auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
00448 
00449         __gthread_time_t __ts =
00450           {
00451             static_cast<std::time_t>(__s.time_since_epoch().count()),
00452             static_cast<long>(__ns.count())
00453           };
00454 
00455         int __ret;
00456         // Unlike for lock(), we are not allowed to throw an exception so if
00457         // the maximum number of read locks has been exceeded, or we would
00458         // deadlock, we just try to acquire the lock again (and will time out
00459         // eventually).
00460         // In cases where we would exceed the maximum number of read locks
00461         // throughout the whole time until the timeout, we will fail to
00462         // acquire the lock even if it would be logically free; however, this
00463         // is allowed by the standard, and we made a "strong effort"
00464         // (see C++14 30.4.1.4p26).
00465         // For cases where the implementation detects a deadlock we
00466         // intentionally block and timeout so that an early return isn't
00467         // mistaken for a spurious failure, which might help users realise
00468         // there is a deadlock.
00469         do
00470           __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
00471         while (__ret == EAGAIN || __ret == EDEADLK);
00472         if (__ret == ETIMEDOUT)
00473           return false;
00474         // Errors not handled: EINVAL
00475         __glibcxx_assert(__ret == 0);
00476         return true;
00477       }
00478 
00479     template<typename _Clock, typename _Duration>
00480       bool
00481       try_lock_shared_until(const chrono::time_point<_Clock,
00482                                                      _Duration>& __abs_time)
00483       {
00484         // DR 887 - Sync unknown clock to known clock.
00485         const typename _Clock::time_point __c_entry = _Clock::now();
00486         const __clock_t::time_point __s_entry = __clock_t::now();
00487         const auto __delta = __abs_time - __c_entry;
00488         const auto __s_atime = __s_entry + __delta;
00489         return try_lock_shared_until(__s_atime);
00490       }
00491 
00492 #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
00493 
00494     // Exclusive ownership
00495 
00496     template<typename _Clock, typename _Duration>
00497       bool
00498       try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
00499       {
00500         unique_lock<mutex> __lk(_M_mut);
00501         if (!_M_gate1.wait_until(__lk, __abs_time,
00502                                  [=]{ return !_M_write_entered(); }))
00503           {
00504             return false;
00505           }
00506         _M_state |= _S_write_entered;
00507         if (!_M_gate2.wait_until(__lk, __abs_time,
00508                                  [=]{ return _M_readers() == 0; }))
00509           {
00510             _M_state ^= _S_write_entered;
00511             // Wake all threads blocked while the write-entered flag was set.
00512             _M_gate1.notify_all();
00513             return false;
00514           }
00515         return true;
00516       }
00517 
00518     // Shared ownership
00519 
00520     template <typename _Clock, typename _Duration>
00521       bool
00522       try_lock_shared_until(const chrono::time_point<_Clock,
00523                                                      _Duration>& __abs_time)
00524       {
00525         unique_lock<mutex> __lk(_M_mut);
00526         if (!_M_gate1.wait_until(__lk, __abs_time,
00527                                  [=]{ return _M_state < _S_max_readers; }))
00528           {
00529             return false;
00530           }
00531         ++_M_state;
00532         return true;
00533       }
00534 
00535 #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
00536   };
00537 #endif // _GLIBCXX_HAS_GTHREADS
00538 
00539   /// shared_lock
00540   template<typename _Mutex>
00541     class shared_lock
00542     {
00543     public:
00544       typedef _Mutex mutex_type;
00545 
00546       // Shared locking
00547 
00548       shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
00549 
00550       explicit
00551       shared_lock(mutex_type& __m)
00552       : _M_pm(std::__addressof(__m)), _M_owns(true)
00553       { __m.lock_shared(); }
00554 
00555       shared_lock(mutex_type& __m, defer_lock_t) noexcept
00556       : _M_pm(std::__addressof(__m)), _M_owns(false) { }
00557 
00558       shared_lock(mutex_type& __m, try_to_lock_t)
00559       : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
00560 
00561       shared_lock(mutex_type& __m, adopt_lock_t)
00562       : _M_pm(std::__addressof(__m)), _M_owns(true) { }
00563 
00564       template<typename _Clock, typename _Duration>
00565         shared_lock(mutex_type& __m,
00566                     const chrono::time_point<_Clock, _Duration>& __abs_time)
00567       : _M_pm(std::__addressof(__m)),
00568         _M_owns(__m.try_lock_shared_until(__abs_time)) { }
00569 
00570       template<typename _Rep, typename _Period>
00571         shared_lock(mutex_type& __m,
00572                     const chrono::duration<_Rep, _Period>& __rel_time)
00573       : _M_pm(std::__addressof(__m)),
00574         _M_owns(__m.try_lock_shared_for(__rel_time)) { }
00575 
00576       ~shared_lock()
00577       {
00578         if (_M_owns)
00579           _M_pm->unlock_shared();
00580       }
00581 
00582       shared_lock(shared_lock const&) = delete;
00583       shared_lock& operator=(shared_lock const&) = delete;
00584 
00585       shared_lock(shared_lock&& __sl) noexcept : shared_lock()
00586       { swap(__sl); }
00587 
00588       shared_lock&
00589       operator=(shared_lock&& __sl) noexcept
00590       {
00591         shared_lock(std::move(__sl)).swap(*this);
00592         return *this;
00593       }
00594 
00595       void
00596       lock()
00597       {
00598         _M_lockable();
00599         _M_pm->lock_shared();
00600         _M_owns = true;
00601       }
00602 
00603       bool
00604       try_lock()
00605       {
00606         _M_lockable();
00607         return _M_owns = _M_pm->try_lock_shared();
00608       }
00609 
00610       template<typename _Rep, typename _Period>
00611         bool
00612         try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
00613         {
00614           _M_lockable();
00615           return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
00616         }
00617 
00618       template<typename _Clock, typename _Duration>
00619         bool
00620         try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
00621         {
00622           _M_lockable();
00623           return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
00624         }
00625 
00626       void
00627       unlock()
00628       {
00629         if (!_M_owns)
00630           __throw_system_error(int(errc::resource_deadlock_would_occur));
00631         _M_pm->unlock_shared();
00632         _M_owns = false;
00633       }
00634 
00635       // Setters
00636 
00637       void
00638       swap(shared_lock& __u) noexcept
00639       {
00640         std::swap(_M_pm, __u._M_pm);
00641         std::swap(_M_owns, __u._M_owns);
00642       }
00643 
00644       mutex_type*
00645       release() noexcept
00646       {
00647         _M_owns = false;
00648         return std::exchange(_M_pm, nullptr);
00649       }
00650 
00651       // Getters
00652 
00653       bool owns_lock() const noexcept { return _M_owns; }
00654 
00655       explicit operator bool() const noexcept { return _M_owns; }
00656 
00657       mutex_type* mutex() const noexcept { return _M_pm; }
00658 
00659     private:
00660       void
00661       _M_lockable() const
00662       {
00663         if (_M_pm == nullptr)
00664           __throw_system_error(int(errc::operation_not_permitted));
00665         if (_M_owns)
00666           __throw_system_error(int(errc::resource_deadlock_would_occur));
00667       }
00668 
00669       mutex_type*       _M_pm;
00670       bool              _M_owns;
00671     };
00672 
00673   /// Swap specialization for shared_lock
00674   template<typename _Mutex>
00675     void
00676     swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
00677     { __x.swap(__y); }
00678 
00679 #endif // _GLIBCXX_USE_C99_STDINT_TR1
00680 
00681   // @} group mutexes
00682 _GLIBCXX_END_NAMESPACE_VERSION
00683 } // namespace
00684 
00685 #endif // C++14
00686 
00687 #endif // _GLIBCXX_SHARED_MUTEX