sanitizer_mutex.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef SANITIZER_MUTEX_H
  13. #define SANITIZER_MUTEX_H
  14. #include "sanitizer_atomic.h"
  15. #include "sanitizer_internal_defs.h"
  16. #include "sanitizer_libc.h"
  17. #include "sanitizer_thread_safety.h"
  18. namespace __sanitizer {
  19. class MUTEX StaticSpinMutex {
  20. public:
  21. void Init() {
  22. atomic_store(&state_, 0, memory_order_relaxed);
  23. }
  24. void Lock() ACQUIRE() {
  25. if (LIKELY(TryLock()))
  26. return;
  27. LockSlow();
  28. }
  29. bool TryLock() TRY_ACQUIRE(true) {
  30. return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
  31. }
  32. void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
  33. void CheckLocked() const CHECK_LOCKED() {
  34. CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
  35. }
  36. private:
  37. atomic_uint8_t state_;
  38. void LockSlow();
  39. };
  40. class MUTEX SpinMutex : public StaticSpinMutex {
  41. public:
  42. SpinMutex() {
  43. Init();
  44. }
  45. SpinMutex(const SpinMutex &) = delete;
  46. void operator=(const SpinMutex &) = delete;
  47. };
  48. // Semaphore provides an OS-dependent way to park/unpark threads.
  49. // The last thread returned from Wait can destroy the object
  50. // (destruction-safety).
  51. class Semaphore {
  52. public:
  53. constexpr Semaphore() {}
  54. Semaphore(const Semaphore &) = delete;
  55. void operator=(const Semaphore &) = delete;
  56. void Wait();
  57. void Post(u32 count = 1);
  58. private:
  59. atomic_uint32_t state_ = {0};
  60. };
  61. typedef int MutexType;
  62. enum {
  63. // Used as sentinel and to catch unassigned types
  64. // (should not be used as real Mutex type).
  65. MutexInvalid = 0,
  66. MutexThreadRegistry,
  67. // Each tool own mutexes must start at this number.
  68. MutexLastCommon,
  69. // Type for legacy mutexes that are not checked for deadlocks.
  70. MutexUnchecked = -1,
  71. // Special marks that can be used in MutexMeta::can_lock table.
  72. // The leaf mutexes can be locked under any other non-leaf mutex,
  73. // but no other mutex can be locked while under a leaf mutex.
  74. MutexLeaf = -1,
  75. // Multiple mutexes of this type can be locked at the same time.
  76. MutexMulti = -3,
  77. };
  78. // Go linker does not support THREADLOCAL variables,
  79. // so we can't use per-thread state.
  80. // Disable checked locks on Darwin. Although Darwin platforms support
  81. // THREADLOCAL variables they are not usable early on during process init when
  82. // `__sanitizer::Mutex` is used.
  83. #define SANITIZER_CHECK_DEADLOCKS \
  84. (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_MAC)
  85. #if SANITIZER_CHECK_DEADLOCKS
  86. struct MutexMeta {
  87. MutexType type;
  88. const char *name;
  89. // The table fixes what mutexes can be locked under what mutexes.
  90. // If the entry for MutexTypeFoo contains MutexTypeBar,
  91. // then Bar mutex can be locked while under Foo mutex.
  92. // Can also contain the special MutexLeaf/MutexMulti marks.
  93. MutexType can_lock[10];
  94. };
  95. #endif
  96. class CheckedMutex {
  97. public:
  98. explicit constexpr CheckedMutex(MutexType type)
  99. #if SANITIZER_CHECK_DEADLOCKS
  100. : type_(type)
  101. #endif
  102. {
  103. }
  104. ALWAYS_INLINE void Lock() {
  105. #if SANITIZER_CHECK_DEADLOCKS
  106. LockImpl(GET_CALLER_PC());
  107. #endif
  108. }
  109. ALWAYS_INLINE void Unlock() {
  110. #if SANITIZER_CHECK_DEADLOCKS
  111. UnlockImpl();
  112. #endif
  113. }
  114. // Checks that the current thread does not hold any mutexes
  115. // (e.g. when returning from a runtime function to user code).
  116. static void CheckNoLocks() {
  117. #if SANITIZER_CHECK_DEADLOCKS
  118. CheckNoLocksImpl();
  119. #endif
  120. }
  121. private:
  122. #if SANITIZER_CHECK_DEADLOCKS
  123. const MutexType type_;
  124. void LockImpl(uptr pc);
  125. void UnlockImpl();
  126. static void CheckNoLocksImpl();
  127. #endif
  128. };
  129. // Reader-writer mutex.
  130. // Derive from CheckedMutex for the purposes of EBO.
  131. // We could make it a field marked with [[no_unique_address]],
  132. // but this attribute is not supported by some older compilers.
  133. class MUTEX Mutex : CheckedMutex {
  134. public:
  135. explicit constexpr Mutex(MutexType type = MutexUnchecked)
  136. : CheckedMutex(type) {}
  137. void Lock() ACQUIRE() {
  138. CheckedMutex::Lock();
  139. u64 reset_mask = ~0ull;
  140. u64 state = atomic_load_relaxed(&state_);
  141. for (uptr spin_iters = 0;; spin_iters++) {
  142. u64 new_state;
  143. bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
  144. if (LIKELY(!locked)) {
  145. // The mutex is not read-/write-locked, try to lock.
  146. new_state = (state | kWriterLock) & reset_mask;
  147. } else if (spin_iters > kMaxSpinIters) {
  148. // We've spun enough, increment waiting writers count and block.
  149. // The counter will be decremented by whoever wakes us.
  150. new_state = (state + kWaitingWriterInc) & reset_mask;
  151. } else if ((state & kWriterSpinWait) == 0) {
  152. // Active spinning, but denote our presence so that unlocking
  153. // thread does not wake up other threads.
  154. new_state = state | kWriterSpinWait;
  155. } else {
  156. // Active spinning.
  157. state = atomic_load(&state_, memory_order_relaxed);
  158. continue;
  159. }
  160. if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  161. memory_order_acquire)))
  162. continue;
  163. if (LIKELY(!locked))
  164. return; // We've locked the mutex.
  165. if (spin_iters > kMaxSpinIters) {
  166. // We've incremented waiting writers, so now block.
  167. writers_.Wait();
  168. spin_iters = 0;
  169. } else {
  170. // We've set kWriterSpinWait, but we are still in active spinning.
  171. }
  172. // We either blocked and were unblocked,
  173. // or we just spun but set kWriterSpinWait.
  174. // Either way we need to reset kWriterSpinWait
  175. // next time we take the lock or block again.
  176. reset_mask = ~kWriterSpinWait;
  177. state = atomic_load(&state_, memory_order_relaxed);
  178. DCHECK_NE(state & kWriterSpinWait, 0);
  179. }
  180. }
  181. void Unlock() RELEASE() {
  182. CheckedMutex::Unlock();
  183. bool wake_writer;
  184. u64 wake_readers;
  185. u64 new_state;
  186. u64 state = atomic_load_relaxed(&state_);
  187. do {
  188. DCHECK_NE(state & kWriterLock, 0);
  189. DCHECK_EQ(state & kReaderLockMask, 0);
  190. new_state = state & ~kWriterLock;
  191. wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
  192. (state & kWaitingWriterMask) != 0;
  193. if (wake_writer)
  194. new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
  195. wake_readers =
  196. wake_writer || (state & kWriterSpinWait) != 0
  197. ? 0
  198. : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
  199. if (wake_readers)
  200. new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
  201. } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  202. memory_order_release)));
  203. if (UNLIKELY(wake_writer))
  204. writers_.Post();
  205. else if (UNLIKELY(wake_readers))
  206. readers_.Post(wake_readers);
  207. }
  208. void ReadLock() ACQUIRE_SHARED() {
  209. CheckedMutex::Lock();
  210. u64 reset_mask = ~0ull;
  211. u64 state = atomic_load_relaxed(&state_);
  212. for (uptr spin_iters = 0;; spin_iters++) {
  213. bool locked = (state & kWriterLock) != 0;
  214. u64 new_state;
  215. if (LIKELY(!locked)) {
  216. new_state = (state + kReaderLockInc) & reset_mask;
  217. } else if (spin_iters > kMaxSpinIters) {
  218. new_state = (state + kWaitingReaderInc) & reset_mask;
  219. } else if ((state & kReaderSpinWait) == 0) {
  220. // Active spinning, but denote our presence so that unlocking
  221. // thread does not wake up other threads.
  222. new_state = state | kReaderSpinWait;
  223. } else {
  224. // Active spinning.
  225. state = atomic_load(&state_, memory_order_relaxed);
  226. continue;
  227. }
  228. if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  229. memory_order_acquire)))
  230. continue;
  231. if (LIKELY(!locked))
  232. return; // We've locked the mutex.
  233. if (spin_iters > kMaxSpinIters) {
  234. // We've incremented waiting readers, so now block.
  235. readers_.Wait();
  236. spin_iters = 0;
  237. } else {
  238. // We've set kReaderSpinWait, but we are still in active spinning.
  239. }
  240. reset_mask = ~kReaderSpinWait;
  241. state = atomic_load(&state_, memory_order_relaxed);
  242. }
  243. }
  244. void ReadUnlock() RELEASE_SHARED() {
  245. CheckedMutex::Unlock();
  246. bool wake;
  247. u64 new_state;
  248. u64 state = atomic_load_relaxed(&state_);
  249. do {
  250. DCHECK_NE(state & kReaderLockMask, 0);
  251. DCHECK_EQ(state & kWriterLock, 0);
  252. new_state = state - kReaderLockInc;
  253. wake = (new_state &
  254. (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
  255. (new_state & kWaitingWriterMask) != 0;
  256. if (wake)
  257. new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
  258. } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
  259. memory_order_release)));
  260. if (UNLIKELY(wake))
  261. writers_.Post();
  262. }
  263. // This function does not guarantee an explicit check that the calling thread
  264. // is the thread which owns the mutex. This behavior, while more strictly
  265. // correct, causes problems in cases like StopTheWorld, where a parent thread
  266. // owns the mutex but a child checks that it is locked. Rather than
  267. // maintaining complex state to work around those situations, the check only
  268. // checks that the mutex is owned.
  269. void CheckWriteLocked() const CHECK_LOCKED() {
  270. CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
  271. }
  272. void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
  273. void CheckReadLocked() const CHECK_LOCKED() {
  274. CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
  275. }
  276. private:
  277. atomic_uint64_t state_ = {0};
  278. Semaphore writers_;
  279. Semaphore readers_;
  280. // The state has 3 counters:
  281. // - number of readers holding the lock,
  282. // if non zero, the mutex is read-locked
  283. // - number of waiting readers,
  284. // if not zero, the mutex is write-locked
  285. // - number of waiting writers,
  286. // if non zero, the mutex is read- or write-locked
  287. // And 2 flags:
  288. // - writer lock
  289. // if set, the mutex is write-locked
  290. // - a writer is awake and spin-waiting
  291. // the flag is used to prevent thundering herd problem
  292. // (new writers are not woken if this flag is set)
  293. // - a reader is awake and spin-waiting
  294. //
  295. // Both writers and readers use active spinning before blocking.
  296. // But readers are more aggressive and always take the mutex
  297. // if there are any other readers.
  298. // After wake up both writers and readers compete to lock the
  299. // mutex again. This is needed to allow repeated locks even in presence
  300. // of other blocked threads.
  301. static constexpr u64 kCounterWidth = 20;
  302. static constexpr u64 kReaderLockShift = 0;
  303. static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
  304. static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
  305. << kReaderLockShift;
  306. static constexpr u64 kWaitingReaderShift = kCounterWidth;
  307. static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
  308. static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
  309. << kWaitingReaderShift;
  310. static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
  311. static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
  312. static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
  313. << kWaitingWriterShift;
  314. static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
  315. static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
  316. static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
  317. static constexpr uptr kMaxSpinIters = 1500;
  318. Mutex(LinkerInitialized) = delete;
  319. Mutex(const Mutex &) = delete;
  320. void operator=(const Mutex &) = delete;
  321. };
  322. void FutexWait(atomic_uint32_t *p, u32 cmp);
  323. void FutexWake(atomic_uint32_t *p, u32 count);
  324. template <typename MutexType>
  325. class SCOPED_LOCK GenericScopedLock {
  326. public:
  327. explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
  328. mu_->Lock();
  329. }
  330. ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
  331. private:
  332. MutexType *mu_;
  333. GenericScopedLock(const GenericScopedLock &) = delete;
  334. void operator=(const GenericScopedLock &) = delete;
  335. };
  336. template <typename MutexType>
  337. class SCOPED_LOCK GenericScopedReadLock {
  338. public:
  339. explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
  340. mu_->ReadLock();
  341. }
  342. ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
  343. private:
  344. MutexType *mu_;
  345. GenericScopedReadLock(const GenericScopedReadLock &) = delete;
  346. void operator=(const GenericScopedReadLock &) = delete;
  347. };
  348. template <typename MutexType>
  349. class SCOPED_LOCK GenericScopedRWLock {
  350. public:
  351. ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
  352. ACQUIRE(mu)
  353. : mu_(mu), write_(write) {
  354. if (write_)
  355. mu_->Lock();
  356. else
  357. mu_->ReadLock();
  358. }
  359. ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() {
  360. if (write_)
  361. mu_->Unlock();
  362. else
  363. mu_->ReadUnlock();
  364. }
  365. private:
  366. MutexType *mu_;
  367. bool write_;
  368. GenericScopedRWLock(const GenericScopedRWLock &) = delete;
  369. void operator=(const GenericScopedRWLock &) = delete;
  370. };
  371. typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
  372. typedef GenericScopedLock<Mutex> Lock;
  373. typedef GenericScopedReadLock<Mutex> ReadLock;
  374. typedef GenericScopedRWLock<Mutex> RWLock;
  375. } // namespace __sanitizer
  376. #endif // SANITIZER_MUTEX_H