mutex 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969
  1. // <mutex> -*- C++ -*-
  2. // Copyright (C) 2003-2022 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file include/mutex
  21. * This is a Standard C++ Library header.
  22. */
  23. #ifndef _GLIBCXX_MUTEX
  24. #define _GLIBCXX_MUTEX 1
  25. #pragma GCC system_header
  26. #if __cplusplus < 201103L
  27. # include <bits/c++0x_warning.h>
  28. #else
  29. #include <tuple>
  30. #include <exception>
  31. #include <type_traits>
  32. #include <system_error>
  33. #include <bits/chrono.h>
  34. #include <bits/std_mutex.h>
  35. #include <bits/unique_lock.h>
  36. #if ! _GTHREAD_USE_MUTEX_TIMEDLOCK
  37. # include <condition_variable>
  38. # include <thread>
  39. #endif
  40. #include <ext/atomicity.h> // __gnu_cxx::__is_single_threaded
  41. #if defined _GLIBCXX_HAS_GTHREADS && ! defined _GLIBCXX_HAVE_TLS
  42. # include <bits/std_function.h> // std::function
  43. #endif
  44. namespace std _GLIBCXX_VISIBILITY(default)
  45. {
  46. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  47. /**
  48. * @addtogroup mutexes
  49. * @{
  50. */
  51. #ifdef _GLIBCXX_HAS_GTHREADS
  52. // Common base class for std::recursive_mutex and std::recursive_timed_mutex
  53. class __recursive_mutex_base
  54. {
  55. protected:
  56. typedef __gthread_recursive_mutex_t __native_type;
  57. __recursive_mutex_base(const __recursive_mutex_base&) = delete;
  58. __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
  59. #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
  60. __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
  61. __recursive_mutex_base() = default;
  62. #else
  63. __native_type _M_mutex;
  64. __recursive_mutex_base()
  65. {
  66. // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
  67. __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
  68. }
  69. ~__recursive_mutex_base()
  70. { __gthread_recursive_mutex_destroy(&_M_mutex); }
  71. #endif
  72. };
  73. /// The standard recursive mutex type.
  74. class recursive_mutex : private __recursive_mutex_base
  75. {
  76. public:
  77. typedef __native_type* native_handle_type;
  78. recursive_mutex() = default;
  79. ~recursive_mutex() = default;
  80. recursive_mutex(const recursive_mutex&) = delete;
  81. recursive_mutex& operator=(const recursive_mutex&) = delete;
  82. void
  83. lock()
  84. {
  85. int __e = __gthread_recursive_mutex_lock(&_M_mutex);
  86. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  87. if (__e)
  88. __throw_system_error(__e);
  89. }
  90. bool
  91. try_lock() noexcept
  92. {
  93. // XXX EINVAL, EAGAIN, EBUSY
  94. return !__gthread_recursive_mutex_trylock(&_M_mutex);
  95. }
  96. void
  97. unlock()
  98. {
  99. // XXX EINVAL, EAGAIN, EBUSY
  100. __gthread_recursive_mutex_unlock(&_M_mutex);
  101. }
  102. native_handle_type
  103. native_handle() noexcept
  104. { return &_M_mutex; }
  105. };
  106. #if _GTHREAD_USE_MUTEX_TIMEDLOCK
  107. template<typename _Derived>
  108. class __timed_mutex_impl
  109. {
  110. protected:
  111. template<typename _Rep, typename _Period>
  112. bool
  113. _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  114. {
  115. #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  116. using __clock = chrono::steady_clock;
  117. #else
  118. using __clock = chrono::system_clock;
  119. #endif
  120. auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
  121. if (ratio_greater<__clock::period, _Period>())
  122. ++__rt;
  123. return _M_try_lock_until(__clock::now() + __rt);
  124. }
  125. template<typename _Duration>
  126. bool
  127. _M_try_lock_until(const chrono::time_point<chrono::system_clock,
  128. _Duration>& __atime)
  129. {
  130. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  131. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  132. __gthread_time_t __ts = {
  133. static_cast<std::time_t>(__s.time_since_epoch().count()),
  134. static_cast<long>(__ns.count())
  135. };
  136. return static_cast<_Derived*>(this)->_M_timedlock(__ts);
  137. }
  138. #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  139. template<typename _Duration>
  140. bool
  141. _M_try_lock_until(const chrono::time_point<chrono::steady_clock,
  142. _Duration>& __atime)
  143. {
  144. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  145. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  146. __gthread_time_t __ts = {
  147. static_cast<std::time_t>(__s.time_since_epoch().count()),
  148. static_cast<long>(__ns.count())
  149. };
  150. return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
  151. __ts);
  152. }
  153. #endif
  154. template<typename _Clock, typename _Duration>
  155. bool
  156. _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  157. {
  158. #if __cplusplus > 201703L
  159. static_assert(chrono::is_clock_v<_Clock>);
  160. #endif
  161. // The user-supplied clock may not tick at the same rate as
  162. // steady_clock, so we must loop in order to guarantee that
  163. // the timeout has expired before returning false.
  164. auto __now = _Clock::now();
  165. do {
  166. auto __rtime = __atime - __now;
  167. if (_M_try_lock_for(__rtime))
  168. return true;
  169. __now = _Clock::now();
  170. } while (__atime > __now);
  171. return false;
  172. }
  173. };
  174. /// The standard timed mutex type.
  175. class timed_mutex
  176. : private __mutex_base, public __timed_mutex_impl<timed_mutex>
  177. {
  178. public:
  179. typedef __native_type* native_handle_type;
  180. timed_mutex() = default;
  181. ~timed_mutex() = default;
  182. timed_mutex(const timed_mutex&) = delete;
  183. timed_mutex& operator=(const timed_mutex&) = delete;
  184. void
  185. lock()
  186. {
  187. int __e = __gthread_mutex_lock(&_M_mutex);
  188. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  189. if (__e)
  190. __throw_system_error(__e);
  191. }
  192. bool
  193. try_lock() noexcept
  194. {
  195. // XXX EINVAL, EAGAIN, EBUSY
  196. return !__gthread_mutex_trylock(&_M_mutex);
  197. }
  198. template <class _Rep, class _Period>
  199. bool
  200. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  201. { return _M_try_lock_for(__rtime); }
  202. template <class _Clock, class _Duration>
  203. bool
  204. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  205. { return _M_try_lock_until(__atime); }
  206. void
  207. unlock()
  208. {
  209. // XXX EINVAL, EAGAIN, EBUSY
  210. __gthread_mutex_unlock(&_M_mutex);
  211. }
  212. native_handle_type
  213. native_handle() noexcept
  214. { return &_M_mutex; }
  215. private:
  216. friend class __timed_mutex_impl<timed_mutex>;
  217. bool
  218. _M_timedlock(const __gthread_time_t& __ts)
  219. { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
  220. #if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  221. bool
  222. _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
  223. { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
  224. #endif
  225. };
  226. /// recursive_timed_mutex
  227. class recursive_timed_mutex
  228. : private __recursive_mutex_base,
  229. public __timed_mutex_impl<recursive_timed_mutex>
  230. {
  231. public:
  232. typedef __native_type* native_handle_type;
  233. recursive_timed_mutex() = default;
  234. ~recursive_timed_mutex() = default;
  235. recursive_timed_mutex(const recursive_timed_mutex&) = delete;
  236. recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
  237. void
  238. lock()
  239. {
  240. int __e = __gthread_recursive_mutex_lock(&_M_mutex);
  241. // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
  242. if (__e)
  243. __throw_system_error(__e);
  244. }
  245. bool
  246. try_lock() noexcept
  247. {
  248. // XXX EINVAL, EAGAIN, EBUSY
  249. return !__gthread_recursive_mutex_trylock(&_M_mutex);
  250. }
  251. template <class _Rep, class _Period>
  252. bool
  253. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  254. { return _M_try_lock_for(__rtime); }
  255. template <class _Clock, class _Duration>
  256. bool
  257. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  258. { return _M_try_lock_until(__atime); }
  259. void
  260. unlock()
  261. {
  262. // XXX EINVAL, EAGAIN, EBUSY
  263. __gthread_recursive_mutex_unlock(&_M_mutex);
  264. }
  265. native_handle_type
  266. native_handle() noexcept
  267. { return &_M_mutex; }
  268. private:
  269. friend class __timed_mutex_impl<recursive_timed_mutex>;
  270. bool
  271. _M_timedlock(const __gthread_time_t& __ts)
  272. { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
  273. #ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
  274. bool
  275. _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
  276. { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
  277. #endif
  278. };
  279. #else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
  280. /// timed_mutex
  281. class timed_mutex
  282. {
  283. mutex _M_mut;
  284. condition_variable _M_cv;
  285. bool _M_locked = false;
  286. public:
  287. timed_mutex() = default;
  288. ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
  289. timed_mutex(const timed_mutex&) = delete;
  290. timed_mutex& operator=(const timed_mutex&) = delete;
  291. void
  292. lock()
  293. {
  294. unique_lock<mutex> __lk(_M_mut);
  295. _M_cv.wait(__lk, [&]{ return !_M_locked; });
  296. _M_locked = true;
  297. }
  298. bool
  299. try_lock()
  300. {
  301. lock_guard<mutex> __lk(_M_mut);
  302. if (_M_locked)
  303. return false;
  304. _M_locked = true;
  305. return true;
  306. }
  307. template<typename _Rep, typename _Period>
  308. bool
  309. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  310. {
  311. unique_lock<mutex> __lk(_M_mut);
  312. if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
  313. return false;
  314. _M_locked = true;
  315. return true;
  316. }
  317. template<typename _Clock, typename _Duration>
  318. bool
  319. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  320. {
  321. unique_lock<mutex> __lk(_M_mut);
  322. if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
  323. return false;
  324. _M_locked = true;
  325. return true;
  326. }
  327. void
  328. unlock()
  329. {
  330. lock_guard<mutex> __lk(_M_mut);
  331. __glibcxx_assert( _M_locked );
  332. _M_locked = false;
  333. _M_cv.notify_one();
  334. }
  335. };
  336. /// recursive_timed_mutex
  337. class recursive_timed_mutex
  338. {
  339. mutex _M_mut;
  340. condition_variable _M_cv;
  341. thread::id _M_owner;
  342. unsigned _M_count = 0;
  343. // Predicate type that tests whether the current thread can lock a mutex.
  344. struct _Can_lock
  345. {
  346. // Returns true if the mutex is unlocked or is locked by _M_caller.
  347. bool
  348. operator()() const noexcept
  349. { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
  350. const recursive_timed_mutex* _M_mx;
  351. thread::id _M_caller;
  352. };
  353. public:
  354. recursive_timed_mutex() = default;
  355. ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
  356. recursive_timed_mutex(const recursive_timed_mutex&) = delete;
  357. recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
  358. void
  359. lock()
  360. {
  361. auto __id = this_thread::get_id();
  362. _Can_lock __can_lock{this, __id};
  363. unique_lock<mutex> __lk(_M_mut);
  364. _M_cv.wait(__lk, __can_lock);
  365. if (_M_count == -1u)
  366. __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
  367. _M_owner = __id;
  368. ++_M_count;
  369. }
  370. bool
  371. try_lock()
  372. {
  373. auto __id = this_thread::get_id();
  374. _Can_lock __can_lock{this, __id};
  375. lock_guard<mutex> __lk(_M_mut);
  376. if (!__can_lock())
  377. return false;
  378. if (_M_count == -1u)
  379. return false;
  380. _M_owner = __id;
  381. ++_M_count;
  382. return true;
  383. }
  384. template<typename _Rep, typename _Period>
  385. bool
  386. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  387. {
  388. auto __id = this_thread::get_id();
  389. _Can_lock __can_lock{this, __id};
  390. unique_lock<mutex> __lk(_M_mut);
  391. if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
  392. return false;
  393. if (_M_count == -1u)
  394. return false;
  395. _M_owner = __id;
  396. ++_M_count;
  397. return true;
  398. }
  399. template<typename _Clock, typename _Duration>
  400. bool
  401. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  402. {
  403. auto __id = this_thread::get_id();
  404. _Can_lock __can_lock{this, __id};
  405. unique_lock<mutex> __lk(_M_mut);
  406. if (!_M_cv.wait_until(__lk, __atime, __can_lock))
  407. return false;
  408. if (_M_count == -1u)
  409. return false;
  410. _M_owner = __id;
  411. ++_M_count;
  412. return true;
  413. }
  414. void
  415. unlock()
  416. {
  417. lock_guard<mutex> __lk(_M_mut);
  418. __glibcxx_assert( _M_owner == this_thread::get_id() );
  419. __glibcxx_assert( _M_count > 0 );
  420. if (--_M_count == 0)
  421. {
  422. _M_owner = {};
  423. _M_cv.notify_one();
  424. }
  425. }
  426. };
  427. #endif
  428. #endif // _GLIBCXX_HAS_GTHREADS
  429. /// @cond undocumented
  430. namespace __detail
  431. {
  432. // Lock the last lockable, after all previous ones are locked.
  433. template<typename _Lockable>
  434. inline int
  435. __try_lock_impl(_Lockable& __l)
  436. {
  437. if (unique_lock<_Lockable> __lock{__l, try_to_lock})
  438. {
  439. __lock.release();
  440. return -1;
  441. }
  442. else
  443. return 0;
  444. }
  445. // Lock each lockable in turn.
  446. // Use iteration if all lockables are the same type, recursion otherwise.
  447. template<typename _L0, typename... _Lockables>
  448. inline int
  449. __try_lock_impl(_L0& __l0, _Lockables&... __lockables)
  450. {
  451. #if __cplusplus >= 201703L
  452. if constexpr ((is_same_v<_L0, _Lockables> && ...))
  453. {
  454. constexpr int _Np = 1 + sizeof...(_Lockables);
  455. unique_lock<_L0> __locks[_Np] = {
  456. {__l0, defer_lock}, {__lockables, defer_lock}...
  457. };
  458. for (int __i = 0; __i < _Np; ++__i)
  459. {
  460. if (!__locks[__i].try_lock())
  461. {
  462. const int __failed = __i;
  463. while (__i--)
  464. __locks[__i].unlock();
  465. return __failed;
  466. }
  467. }
  468. for (auto& __l : __locks)
  469. __l.release();
  470. return -1;
  471. }
  472. else
  473. #endif
  474. if (unique_lock<_L0> __lock{__l0, try_to_lock})
  475. {
  476. int __idx = __detail::__try_lock_impl(__lockables...);
  477. if (__idx == -1)
  478. {
  479. __lock.release();
  480. return -1;
  481. }
  482. return __idx + 1;
  483. }
  484. else
  485. return 0;
  486. }
  487. } // namespace __detail
  488. /// @endcond
  489. /** @brief Generic try_lock.
  490. * @param __l1 Meets Lockable requirements (try_lock() may throw).
  491. * @param __l2 Meets Lockable requirements (try_lock() may throw).
  492. * @param __l3 Meets Lockable requirements (try_lock() may throw).
  493. * @return Returns -1 if all try_lock() calls return true. Otherwise returns
  494. * a 0-based index corresponding to the argument that returned false.
  495. * @post Either all arguments are locked, or none will be.
  496. *
  497. * Sequentially calls try_lock() on each argument.
  498. */
  499. template<typename _L1, typename _L2, typename... _L3>
  500. inline int
  501. try_lock(_L1& __l1, _L2& __l2, _L3&... __l3)
  502. {
  503. return __detail::__try_lock_impl(__l1, __l2, __l3...);
  504. }
  505. /// @cond undocumented
  506. namespace __detail
  507. {
  508. // This function can recurse up to N levels deep, for N = 1+sizeof...(L1).
  509. // On each recursion the lockables are rotated left one position,
  510. // e.g. depth 0: l0, l1, l2; depth 1: l1, l2, l0; depth 2: l2, l0, l1.
  511. // When a call to l_i.try_lock() fails it recurses/returns to depth=i
  512. // so that l_i is the first argument, and then blocks until l_i is locked.
  513. template<typename _L0, typename... _L1>
  514. void
  515. __lock_impl(int& __i, int __depth, _L0& __l0, _L1&... __l1)
  516. {
  517. while (__i >= __depth)
  518. {
  519. if (__i == __depth)
  520. {
  521. int __failed = 1; // index that couldn't be locked
  522. {
  523. unique_lock<_L0> __first(__l0);
  524. __failed += __detail::__try_lock_impl(__l1...);
  525. if (!__failed)
  526. {
  527. __i = -1; // finished
  528. __first.release();
  529. return;
  530. }
  531. }
  532. #if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
  533. __gthread_yield();
  534. #endif
  535. constexpr auto __n = 1 + sizeof...(_L1);
  536. __i = (__depth + __failed) % __n;
  537. }
  538. else // rotate left until l_i is first.
  539. __detail::__lock_impl(__i, __depth + 1, __l1..., __l0);
  540. }
  541. }
  542. } // namespace __detail
  543. /// @endcond
  544. /** @brief Generic lock.
  545. * @param __l1 Meets Lockable requirements (try_lock() may throw).
  546. * @param __l2 Meets Lockable requirements (try_lock() may throw).
  547. * @param __l3 Meets Lockable requirements (try_lock() may throw).
  548. * @throw An exception thrown by an argument's lock() or try_lock() member.
  549. * @post All arguments are locked.
  550. *
  551. * All arguments are locked via a sequence of calls to lock(), try_lock()
  552. * and unlock(). If this function exits via an exception any locks that
  553. * were obtained will be released.
  554. */
  555. template<typename _L1, typename _L2, typename... _L3>
  556. void
  557. lock(_L1& __l1, _L2& __l2, _L3&... __l3)
  558. {
  559. #if __cplusplus >= 201703L
  560. if constexpr (is_same_v<_L1, _L2> && (is_same_v<_L1, _L3> && ...))
  561. {
  562. constexpr int _Np = 2 + sizeof...(_L3);
  563. unique_lock<_L1> __locks[] = {
  564. {__l1, defer_lock}, {__l2, defer_lock}, {__l3, defer_lock}...
  565. };
  566. int __first = 0;
  567. do {
  568. __locks[__first].lock();
  569. for (int __j = 1; __j < _Np; ++__j)
  570. {
  571. const int __idx = (__first + __j) % _Np;
  572. if (!__locks[__idx].try_lock())
  573. {
  574. for (int __k = __j; __k != 0; --__k)
  575. __locks[(__first + __k - 1) % _Np].unlock();
  576. __first = __idx;
  577. break;
  578. }
  579. }
  580. } while (!__locks[__first].owns_lock());
  581. for (auto& __l : __locks)
  582. __l.release();
  583. }
  584. else
  585. #endif
  586. {
  587. int __i = 0;
  588. __detail::__lock_impl(__i, 0, __l1, __l2, __l3...);
  589. }
  590. }
  591. #if __cplusplus >= 201703L
  592. #define __cpp_lib_scoped_lock 201703L
  593. /** @brief A scoped lock type for multiple lockable objects.
  594. *
  595. * A scoped_lock controls mutex ownership within a scope, releasing
  596. * ownership in the destructor.
  597. */
  598. template<typename... _MutexTypes>
  599. class scoped_lock
  600. {
  601. public:
  602. explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
  603. { std::lock(__m...); }
  604. explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
  605. : _M_devices(std::tie(__m...))
  606. { } // calling thread owns mutex
  607. ~scoped_lock()
  608. { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
  609. scoped_lock(const scoped_lock&) = delete;
  610. scoped_lock& operator=(const scoped_lock&) = delete;
  611. private:
  612. tuple<_MutexTypes&...> _M_devices;
  613. };
  614. template<>
  615. class scoped_lock<>
  616. {
  617. public:
  618. explicit scoped_lock() = default;
  619. explicit scoped_lock(adopt_lock_t) noexcept { }
  620. ~scoped_lock() = default;
  621. scoped_lock(const scoped_lock&) = delete;
  622. scoped_lock& operator=(const scoped_lock&) = delete;
  623. };
  624. template<typename _Mutex>
  625. class scoped_lock<_Mutex>
  626. {
  627. public:
  628. using mutex_type = _Mutex;
  629. explicit scoped_lock(mutex_type& __m) : _M_device(__m)
  630. { _M_device.lock(); }
  631. explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
  632. : _M_device(__m)
  633. { } // calling thread owns mutex
  634. ~scoped_lock()
  635. { _M_device.unlock(); }
  636. scoped_lock(const scoped_lock&) = delete;
  637. scoped_lock& operator=(const scoped_lock&) = delete;
  638. private:
  639. mutex_type& _M_device;
  640. };
  641. #endif // C++17
  642. #ifdef _GLIBCXX_HAS_GTHREADS
  643. /// Flag type used by std::call_once
  644. struct once_flag
  645. {
  646. constexpr once_flag() noexcept = default;
  647. /// Deleted copy constructor
  648. once_flag(const once_flag&) = delete;
  649. /// Deleted assignment operator
  650. once_flag& operator=(const once_flag&) = delete;
  651. private:
  652. // For gthreads targets a pthread_once_t is used with pthread_once, but
  653. // for most targets this doesn't work correctly for exceptional executions.
  654. __gthread_once_t _M_once = __GTHREAD_ONCE_INIT;
  655. struct _Prepare_execution;
  656. template<typename _Callable, typename... _Args>
  657. friend void
  658. call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
  659. };
  660. /// @cond undocumented
  661. # ifdef _GLIBCXX_HAVE_TLS
  662. // If TLS is available use thread-local state for the type-erased callable
  663. // that is being run by std::call_once in the current thread.
  664. extern __thread void* __once_callable;
  665. extern __thread void (*__once_call)();
  666. // RAII type to set up state for pthread_once call.
  667. struct once_flag::_Prepare_execution
  668. {
  669. template<typename _Callable>
  670. explicit
  671. _Prepare_execution(_Callable& __c)
  672. {
  673. // Store address in thread-local pointer:
  674. __once_callable = std::__addressof(__c);
  675. // Trampoline function to invoke the closure via thread-local pointer:
  676. __once_call = [] { (*static_cast<_Callable*>(__once_callable))(); };
  677. }
  678. ~_Prepare_execution()
  679. {
  680. // PR libstdc++/82481
  681. __once_callable = nullptr;
  682. __once_call = nullptr;
  683. }
  684. _Prepare_execution(const _Prepare_execution&) = delete;
  685. _Prepare_execution& operator=(const _Prepare_execution&) = delete;
  686. };
  687. # else
  688. // Without TLS use a global std::mutex and store the callable in a
  689. // global std::function.
  690. extern function<void()> __once_functor;
  691. extern void
  692. __set_once_functor_lock_ptr(unique_lock<mutex>*);
  693. extern mutex&
  694. __get_once_mutex();
  695. // RAII type to set up state for pthread_once call.
  696. struct once_flag::_Prepare_execution
  697. {
  698. template<typename _Callable>
  699. explicit
  700. _Prepare_execution(_Callable& __c)
  701. {
  702. // Store the callable in the global std::function
  703. __once_functor = __c;
  704. __set_once_functor_lock_ptr(&_M_functor_lock);
  705. }
  706. ~_Prepare_execution()
  707. {
  708. if (_M_functor_lock)
  709. __set_once_functor_lock_ptr(nullptr);
  710. }
  711. private:
  712. // XXX This deadlocks if used recursively (PR 97949)
  713. unique_lock<mutex> _M_functor_lock{__get_once_mutex()};
  714. _Prepare_execution(const _Prepare_execution&) = delete;
  715. _Prepare_execution& operator=(const _Prepare_execution&) = delete;
  716. };
  717. # endif
  718. /// @endcond
  719. // This function is passed to pthread_once by std::call_once.
  720. // It runs __once_call() or __once_functor().
  721. extern "C" void __once_proxy(void);
  722. /// Invoke a callable and synchronize with other calls using the same flag
  723. template<typename _Callable, typename... _Args>
  724. void
  725. call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
  726. {
  727. // Closure type that runs the function
  728. auto __callable = [&] {
  729. std::__invoke(std::forward<_Callable>(__f),
  730. std::forward<_Args>(__args)...);
  731. };
  732. once_flag::_Prepare_execution __exec(__callable);
  733. // XXX pthread_once does not reset the flag if an exception is thrown.
  734. if (int __e = __gthread_once(&__once._M_once, &__once_proxy))
  735. __throw_system_error(__e);
  736. }
  737. #else // _GLIBCXX_HAS_GTHREADS
  738. /// Flag type used by std::call_once
  739. struct once_flag
  740. {
  741. constexpr once_flag() noexcept = default;
  742. /// Deleted copy constructor
  743. once_flag(const once_flag&) = delete;
  744. /// Deleted assignment operator
  745. once_flag& operator=(const once_flag&) = delete;
  746. private:
  747. // There are two different std::once_flag interfaces, abstracting four
  748. // different implementations.
  749. // The single-threaded interface uses the _M_activate() and _M_finish(bool)
  750. // functions, which start and finish an active execution respectively.
  751. // See [thread.once.callonce] in C++11 for the definition of
  752. // active/passive/returning/exceptional executions.
  753. enum _Bits : int { _Init = 0, _Active = 1, _Done = 2 };
  754. int _M_once = _Bits::_Init;
  755. // Check to see if all executions will be passive now.
  756. bool
  757. _M_passive() const noexcept;
  758. // Attempts to begin an active execution.
  759. bool _M_activate();
  760. // Must be called to complete an active execution.
  761. // The argument is true if the active execution was a returning execution,
  762. // false if it was an exceptional execution.
  763. void _M_finish(bool __returning) noexcept;
  764. // RAII helper to call _M_finish.
  765. struct _Active_execution
  766. {
  767. explicit _Active_execution(once_flag& __flag) : _M_flag(__flag) { }
  768. ~_Active_execution() { _M_flag._M_finish(_M_returning); }
  769. _Active_execution(const _Active_execution&) = delete;
  770. _Active_execution& operator=(const _Active_execution&) = delete;
  771. once_flag& _M_flag;
  772. bool _M_returning = false;
  773. };
  774. template<typename _Callable, typename... _Args>
  775. friend void
  776. call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
  777. };
  778. // Inline definitions of std::once_flag members for single-threaded targets.
  779. inline bool
  780. once_flag::_M_passive() const noexcept
  781. { return _M_once == _Bits::_Done; }
  782. inline bool
  783. once_flag::_M_activate()
  784. {
  785. if (_M_once == _Bits::_Init) [[__likely__]]
  786. {
  787. _M_once = _Bits::_Active;
  788. return true;
  789. }
  790. else if (_M_passive()) // Caller should have checked this already.
  791. return false;
  792. else
  793. __throw_system_error(EDEADLK);
  794. }
  795. inline void
  796. once_flag::_M_finish(bool __returning) noexcept
  797. { _M_once = __returning ? _Bits::_Done : _Bits::_Init; }
  798. /// Invoke a callable and synchronize with other calls using the same flag
  799. template<typename _Callable, typename... _Args>
  800. inline void
  801. call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
  802. {
  803. if (__once._M_passive())
  804. return;
  805. else if (__once._M_activate())
  806. {
  807. once_flag::_Active_execution __exec(__once);
  808. // _GLIBCXX_RESOLVE_LIB_DEFECTS
  809. // 2442. call_once() shouldn't DECAY_COPY()
  810. std::__invoke(std::forward<_Callable>(__f),
  811. std::forward<_Args>(__args)...);
  812. // __f(__args...) did not throw
  813. __exec._M_returning = true;
  814. }
  815. }
  816. #endif // _GLIBCXX_HAS_GTHREADS
  817. /// @} group mutexes
  818. _GLIBCXX_END_NAMESPACE_VERSION
  819. } // namespace
  820. #endif // C++11
  821. #endif // _GLIBCXX_MUTEX