atomic_futex.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. // -*- C++ -*- header.
  2. // Copyright (C) 2015-2022 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file bits/atomic_futex.h
  21. * This is an internal header file, included by other library headers.
  22. * Do not attempt to use it directly.
  23. */
  24. #ifndef _GLIBCXX_ATOMIC_FUTEX_H
  25. #define _GLIBCXX_ATOMIC_FUTEX_H 1
  26. #pragma GCC system_header
  27. #include <atomic>
  28. #if ! (defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1)
  29. #include <mutex>
  30. #include <condition_variable>
  31. #endif
  32. #include <bits/chrono.h>
  33. #ifndef _GLIBCXX_ALWAYS_INLINE
  34. #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
  35. #endif
  36. namespace std _GLIBCXX_VISIBILITY(default)
  37. {
  38. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  39. #ifdef _GLIBCXX_HAS_GTHREADS
  40. #if defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
  41. struct __atomic_futex_unsigned_base
  42. {
  43. // __s and __ns are measured against CLOCK_REALTIME. Returns false
  44. // iff a timeout occurred.
  45. bool
  46. _M_futex_wait_until(unsigned *__addr, unsigned __val, bool __has_timeout,
  47. chrono::seconds __s, chrono::nanoseconds __ns);
  48. // __s and __ns are measured against CLOCK_MONOTONIC. Returns
  49. // false iff a timeout occurred.
  50. bool
  51. _M_futex_wait_until_steady(unsigned *__addr, unsigned __val,
  52. bool __has_timeout, chrono::seconds __s, chrono::nanoseconds __ns);
  53. // This can be executed after the object has been destroyed.
  54. static void _M_futex_notify_all(unsigned* __addr);
  55. };
  56. template <unsigned _Waiter_bit = 0x80000000>
  57. class __atomic_futex_unsigned : __atomic_futex_unsigned_base
  58. {
  59. typedef chrono::steady_clock __clock_t;
  60. // This must be lock-free and at offset 0.
  61. atomic<unsigned> _M_data;
  62. public:
  63. explicit
  64. __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
  65. { }
  66. _GLIBCXX_ALWAYS_INLINE unsigned
  67. _M_load(memory_order __mo)
  68. {
  69. return _M_data.load(__mo) & ~_Waiter_bit;
  70. }
  71. private:
  72. // If a timeout occurs, returns a current value after the timeout;
  73. // otherwise, returns the operand's value if equal is true or a different
  74. // value if equal is false.
  75. // The assumed value is the caller's assumption about the current value
  76. // when making the call.
  77. // __s and __ns are measured against CLOCK_REALTIME.
  78. unsigned
  79. _M_load_and_test_until(unsigned __assumed, unsigned __operand,
  80. bool __equal, memory_order __mo, bool __has_timeout,
  81. chrono::seconds __s, chrono::nanoseconds __ns)
  82. {
  83. for (;;)
  84. {
  85. // Don't bother checking the value again because we expect the caller
  86. // to have done it recently.
  87. // memory_order_relaxed is sufficient because we can rely on just the
  88. // modification order (store_notify uses an atomic RMW operation too),
  89. // and the futex syscalls synchronize between themselves.
  90. _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
  91. bool __ret = _M_futex_wait_until((unsigned*)(void*)&_M_data,
  92. __assumed | _Waiter_bit,
  93. __has_timeout, __s, __ns);
  94. // Fetch the current value after waiting (clears _Waiter_bit).
  95. __assumed = _M_load(__mo);
  96. if (!__ret || ((__operand == __assumed) == __equal))
  97. return __assumed;
  98. // TODO adapt wait time
  99. }
  100. }
  101. // If a timeout occurs, returns a current value after the timeout;
  102. // otherwise, returns the operand's value if equal is true or a different
  103. // value if equal is false.
  104. // The assumed value is the caller's assumption about the current value
  105. // when making the call.
  106. // __s and __ns are measured against CLOCK_MONOTONIC.
  107. unsigned
  108. _M_load_and_test_until_steady(unsigned __assumed, unsigned __operand,
  109. bool __equal, memory_order __mo, bool __has_timeout,
  110. chrono::seconds __s, chrono::nanoseconds __ns)
  111. {
  112. for (;;)
  113. {
  114. // Don't bother checking the value again because we expect the caller
  115. // to have done it recently.
  116. // memory_order_relaxed is sufficient because we can rely on just the
  117. // modification order (store_notify uses an atomic RMW operation too),
  118. // and the futex syscalls synchronize between themselves.
  119. _M_data.fetch_or(_Waiter_bit, memory_order_relaxed);
  120. bool __ret = _M_futex_wait_until_steady((unsigned*)(void*)&_M_data,
  121. __assumed | _Waiter_bit,
  122. __has_timeout, __s, __ns);
  123. // Fetch the current value after waiting (clears _Waiter_bit).
  124. __assumed = _M_load(__mo);
  125. if (!__ret || ((__operand == __assumed) == __equal))
  126. return __assumed;
  127. // TODO adapt wait time
  128. }
  129. }
  130. // Returns the operand's value if equal is true or a different value if
  131. // equal is false.
  132. // The assumed value is the caller's assumption about the current value
  133. // when making the call.
  134. unsigned
  135. _M_load_and_test(unsigned __assumed, unsigned __operand,
  136. bool __equal, memory_order __mo)
  137. {
  138. return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
  139. false, {}, {});
  140. }
  141. // If a timeout occurs, returns a current value after the timeout;
  142. // otherwise, returns the operand's value if equal is true or a different
  143. // value if equal is false.
  144. // The assumed value is the caller's assumption about the current value
  145. // when making the call.
  146. template<typename _Dur>
  147. unsigned
  148. _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
  149. bool __equal, memory_order __mo,
  150. const chrono::time_point<std::chrono::system_clock, _Dur>& __atime)
  151. {
  152. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  153. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  154. // XXX correct?
  155. return _M_load_and_test_until(__assumed, __operand, __equal, __mo,
  156. true, __s.time_since_epoch(), __ns);
  157. }
  158. template<typename _Dur>
  159. unsigned
  160. _M_load_and_test_until_impl(unsigned __assumed, unsigned __operand,
  161. bool __equal, memory_order __mo,
  162. const chrono::time_point<std::chrono::steady_clock, _Dur>& __atime)
  163. {
  164. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  165. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  166. // XXX correct?
  167. return _M_load_and_test_until_steady(__assumed, __operand, __equal, __mo,
  168. true, __s.time_since_epoch(), __ns);
  169. }
  170. public:
  171. _GLIBCXX_ALWAYS_INLINE unsigned
  172. _M_load_when_not_equal(unsigned __val, memory_order __mo)
  173. {
  174. unsigned __i = _M_load(__mo);
  175. if ((__i & ~_Waiter_bit) != __val)
  176. return (__i & ~_Waiter_bit);
  177. // TODO Spin-wait first.
  178. return _M_load_and_test(__i, __val, false, __mo);
  179. }
  180. _GLIBCXX_ALWAYS_INLINE void
  181. _M_load_when_equal(unsigned __val, memory_order __mo)
  182. {
  183. unsigned __i = _M_load(__mo);
  184. if ((__i & ~_Waiter_bit) == __val)
  185. return;
  186. // TODO Spin-wait first.
  187. _M_load_and_test(__i, __val, true, __mo);
  188. }
  189. // Returns false iff a timeout occurred.
  190. template<typename _Rep, typename _Period>
  191. _GLIBCXX_ALWAYS_INLINE bool
  192. _M_load_when_equal_for(unsigned __val, memory_order __mo,
  193. const chrono::duration<_Rep, _Period>& __rtime)
  194. {
  195. using __dur = typename __clock_t::duration;
  196. return _M_load_when_equal_until(__val, __mo,
  197. __clock_t::now() + chrono::__detail::ceil<__dur>(__rtime));
  198. }
  199. // Returns false iff a timeout occurred.
  200. template<typename _Clock, typename _Duration>
  201. _GLIBCXX_ALWAYS_INLINE bool
  202. _M_load_when_equal_until(unsigned __val, memory_order __mo,
  203. const chrono::time_point<_Clock, _Duration>& __atime)
  204. {
  205. typename _Clock::time_point __c_entry = _Clock::now();
  206. do {
  207. const __clock_t::time_point __s_entry = __clock_t::now();
  208. const auto __delta = __atime - __c_entry;
  209. const auto __s_atime = __s_entry +
  210. chrono::__detail::ceil<__clock_t::duration>(__delta);
  211. if (_M_load_when_equal_until(__val, __mo, __s_atime))
  212. return true;
  213. __c_entry = _Clock::now();
  214. } while (__c_entry < __atime);
  215. return false;
  216. }
  217. // Returns false iff a timeout occurred.
  218. template<typename _Duration>
  219. _GLIBCXX_ALWAYS_INLINE bool
  220. _M_load_when_equal_until(unsigned __val, memory_order __mo,
  221. const chrono::time_point<std::chrono::system_clock, _Duration>& __atime)
  222. {
  223. unsigned __i = _M_load(__mo);
  224. if ((__i & ~_Waiter_bit) == __val)
  225. return true;
  226. // TODO Spin-wait first. Ignore effect on timeout.
  227. __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
  228. return (__i & ~_Waiter_bit) == __val;
  229. }
  230. // Returns false iff a timeout occurred.
  231. template<typename _Duration>
  232. _GLIBCXX_ALWAYS_INLINE bool
  233. _M_load_when_equal_until(unsigned __val, memory_order __mo,
  234. const chrono::time_point<std::chrono::steady_clock, _Duration>& __atime)
  235. {
  236. unsigned __i = _M_load(__mo);
  237. if ((__i & ~_Waiter_bit) == __val)
  238. return true;
  239. // TODO Spin-wait first. Ignore effect on timeout.
  240. __i = _M_load_and_test_until_impl(__i, __val, true, __mo, __atime);
  241. return (__i & ~_Waiter_bit) == __val;
  242. }
  243. _GLIBCXX_ALWAYS_INLINE void
  244. _M_store_notify_all(unsigned __val, memory_order __mo)
  245. {
  246. unsigned* __futex = (unsigned *)(void *)&_M_data;
  247. if (_M_data.exchange(__val, __mo) & _Waiter_bit)
  248. _M_futex_notify_all(__futex);
  249. }
  250. };
  251. #else // ! (_GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1)
  252. // If futexes are not available, use a mutex and a condvar to wait.
  253. // Because we access the data only within critical sections, all accesses
  254. // are sequentially consistent; thus, we satisfy any provided memory_order.
  255. template <unsigned _Waiter_bit = 0x80000000>
  256. class __atomic_futex_unsigned
  257. {
  258. typedef chrono::system_clock __clock_t;
  259. unsigned _M_data;
  260. mutex _M_mutex;
  261. condition_variable _M_condvar;
  262. public:
  263. explicit
  264. __atomic_futex_unsigned(unsigned __data) : _M_data(__data)
  265. { }
  266. _GLIBCXX_ALWAYS_INLINE unsigned
  267. _M_load(memory_order __mo)
  268. {
  269. unique_lock<mutex> __lock(_M_mutex);
  270. return _M_data;
  271. }
  272. _GLIBCXX_ALWAYS_INLINE unsigned
  273. _M_load_when_not_equal(unsigned __val, memory_order __mo)
  274. {
  275. unique_lock<mutex> __lock(_M_mutex);
  276. while (_M_data == __val)
  277. _M_condvar.wait(__lock);
  278. return _M_data;
  279. }
  280. _GLIBCXX_ALWAYS_INLINE void
  281. _M_load_when_equal(unsigned __val, memory_order __mo)
  282. {
  283. unique_lock<mutex> __lock(_M_mutex);
  284. while (_M_data != __val)
  285. _M_condvar.wait(__lock);
  286. }
  287. template<typename _Rep, typename _Period>
  288. _GLIBCXX_ALWAYS_INLINE bool
  289. _M_load_when_equal_for(unsigned __val, memory_order __mo,
  290. const chrono::duration<_Rep, _Period>& __rtime)
  291. {
  292. unique_lock<mutex> __lock(_M_mutex);
  293. return _M_condvar.wait_for(__lock, __rtime,
  294. [&] { return _M_data == __val;});
  295. }
  296. template<typename _Clock, typename _Duration>
  297. _GLIBCXX_ALWAYS_INLINE bool
  298. _M_load_when_equal_until(unsigned __val, memory_order __mo,
  299. const chrono::time_point<_Clock, _Duration>& __atime)
  300. {
  301. unique_lock<mutex> __lock(_M_mutex);
  302. return _M_condvar.wait_until(__lock, __atime,
  303. [&] { return _M_data == __val;});
  304. }
  305. _GLIBCXX_ALWAYS_INLINE void
  306. _M_store_notify_all(unsigned __val, memory_order __mo)
  307. {
  308. unique_lock<mutex> __lock(_M_mutex);
  309. _M_data = __val;
  310. _M_condvar.notify_all();
  311. }
  312. };
  313. #endif // _GLIBCXX_HAVE_LINUX_FUTEX && ATOMIC_INT_LOCK_FREE > 1
  314. #endif // _GLIBCXX_HAS_GTHREADS
  315. _GLIBCXX_END_NAMESPACE_VERSION
  316. } // namespace std
  317. #endif