sanitizer_mutex.cpp 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. //===-- sanitizer_mutex.cpp -----------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is shared between AddressSanitizer and ThreadSanitizer
  10. // run-time libraries.
  11. //===----------------------------------------------------------------------===//
  12. #include "sanitizer_mutex.h"
  13. #include "sanitizer_common.h"
  14. namespace __sanitizer {
  15. void StaticSpinMutex::LockSlow() {
  16. for (int i = 0;; i++) {
  17. if (i < 100)
  18. proc_yield(1);
  19. else
  20. internal_sched_yield();
  21. if (atomic_load(&state_, memory_order_relaxed) == 0 &&
  22. atomic_exchange(&state_, 1, memory_order_acquire) == 0)
  23. return;
  24. }
  25. }
  26. void Semaphore::Wait() {
  27. u32 count = atomic_load(&state_, memory_order_relaxed);
  28. for (;;) {
  29. if (count == 0) {
  30. FutexWait(&state_, 0);
  31. count = atomic_load(&state_, memory_order_relaxed);
  32. continue;
  33. }
  34. if (atomic_compare_exchange_weak(&state_, &count, count - 1,
  35. memory_order_acquire))
  36. break;
  37. }
  38. }
  39. void Semaphore::Post(u32 count) {
  40. CHECK_NE(count, 0);
  41. atomic_fetch_add(&state_, count, memory_order_release);
  42. FutexWake(&state_, count);
  43. }
  44. #if SANITIZER_CHECK_DEADLOCKS
  45. // An empty mutex meta table, it effectively disables deadlock detection.
  46. // Each tool can override the table to define own mutex hierarchy and
  47. // enable deadlock detection.
  48. // The table defines a static mutex type hierarchy (what mutex types can be locked
  49. // under what mutex types). This table is checked to be acyclic and then
  50. // actual mutex lock/unlock operations are checked to adhere to this hierarchy.
  51. // The checking happens on mutex types rather than on individual mutex instances
  52. // because doing it on mutex instances will both significantly complicate
  53. // the implementation, worsen performance and memory overhead and is mostly
  54. // unnecessary (we almost never lock multiple mutexes of the same type recursively).
  55. static constexpr int kMutexTypeMax = 20;
  56. SANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta[kMutexTypeMax] = {};
  57. SANITIZER_WEAK_ATTRIBUTE void PrintMutexPC(uptr pc) {}
  58. static StaticSpinMutex mutex_meta_mtx;
  59. static int mutex_type_count = -1;
  60. // Adjacency matrix of what mutexes can be locked under what mutexes.
  61. static bool mutex_can_lock[kMutexTypeMax][kMutexTypeMax];
  62. // Mutex types with MutexMulti mark.
  63. static bool mutex_multi[kMutexTypeMax];
  64. void DebugMutexInit() {
  65. // Build adjacency matrix.
  66. bool leaf[kMutexTypeMax];
  67. internal_memset(&leaf, 0, sizeof(leaf));
  68. int cnt[kMutexTypeMax];
  69. internal_memset(&cnt, 0, sizeof(cnt));
  70. for (int t = 0; t < kMutexTypeMax; t++) {
  71. mutex_type_count = t;
  72. if (!mutex_meta[t].name)
  73. break;
  74. CHECK_EQ(t, mutex_meta[t].type);
  75. for (uptr j = 0; j < ARRAY_SIZE(mutex_meta[t].can_lock); j++) {
  76. MutexType z = mutex_meta[t].can_lock[j];
  77. if (z == MutexInvalid)
  78. break;
  79. if (z == MutexLeaf) {
  80. CHECK(!leaf[t]);
  81. leaf[t] = true;
  82. continue;
  83. }
  84. if (z == MutexMulti) {
  85. mutex_multi[t] = true;
  86. continue;
  87. }
  88. CHECK_LT(z, kMutexTypeMax);
  89. CHECK(!mutex_can_lock[t][z]);
  90. mutex_can_lock[t][z] = true;
  91. cnt[t]++;
  92. }
  93. }
  94. // Indicates the array is not properly terminated.
  95. CHECK_LT(mutex_type_count, kMutexTypeMax);
  96. // Add leaf mutexes.
  97. for (int t = 0; t < mutex_type_count; t++) {
  98. if (!leaf[t])
  99. continue;
  100. CHECK_EQ(cnt[t], 0);
  101. for (int z = 0; z < mutex_type_count; z++) {
  102. if (z == MutexInvalid || t == z || leaf[z])
  103. continue;
  104. CHECK(!mutex_can_lock[z][t]);
  105. mutex_can_lock[z][t] = true;
  106. }
  107. }
  108. // Build the transitive closure and check that the graphs is acyclic.
  109. u32 trans[kMutexTypeMax];
  110. static_assert(sizeof(trans[0]) * 8 >= kMutexTypeMax,
  111. "kMutexTypeMax does not fit into u32, switch to u64");
  112. internal_memset(&trans, 0, sizeof(trans));
  113. for (int i = 0; i < mutex_type_count; i++) {
  114. for (int j = 0; j < mutex_type_count; j++)
  115. if (mutex_can_lock[i][j])
  116. trans[i] |= 1 << j;
  117. }
  118. for (int k = 0; k < mutex_type_count; k++) {
  119. for (int i = 0; i < mutex_type_count; i++) {
  120. if (trans[i] & (1 << k))
  121. trans[i] |= trans[k];
  122. }
  123. }
  124. for (int i = 0; i < mutex_type_count; i++) {
  125. if (trans[i] & (1 << i)) {
  126. Printf("Mutex %s participates in a cycle\n", mutex_meta[i].name);
  127. Die();
  128. }
  129. }
  130. }
  131. struct InternalDeadlockDetector {
  132. struct LockDesc {
  133. u64 seq;
  134. uptr pc;
  135. int recursion;
  136. };
  137. int initialized;
  138. u64 sequence;
  139. LockDesc locked[kMutexTypeMax];
  140. void Lock(MutexType type, uptr pc) {
  141. if (!Initialize(type))
  142. return;
  143. CHECK_LT(type, mutex_type_count);
  144. // Find the last locked mutex type.
  145. // This is the type we will use for hierarchy checks.
  146. u64 max_seq = 0;
  147. MutexType max_idx = MutexInvalid;
  148. for (int i = 0; i != mutex_type_count; i++) {
  149. if (locked[i].seq == 0)
  150. continue;
  151. CHECK_NE(locked[i].seq, max_seq);
  152. if (max_seq < locked[i].seq) {
  153. max_seq = locked[i].seq;
  154. max_idx = (MutexType)i;
  155. }
  156. }
  157. if (max_idx == type && mutex_multi[type]) {
  158. // Recursive lock of the same type.
  159. CHECK_EQ(locked[type].seq, max_seq);
  160. CHECK(locked[type].pc);
  161. locked[type].recursion++;
  162. return;
  163. }
  164. if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
  165. Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
  166. mutex_meta[type].name, mutex_meta[max_idx].name);
  167. PrintMutexPC(locked[max_idx].pc);
  168. CHECK(0);
  169. }
  170. locked[type].seq = ++sequence;
  171. locked[type].pc = pc;
  172. locked[type].recursion = 1;
  173. }
  174. void Unlock(MutexType type) {
  175. if (!Initialize(type))
  176. return;
  177. CHECK_LT(type, mutex_type_count);
  178. CHECK(locked[type].seq);
  179. CHECK_GT(locked[type].recursion, 0);
  180. if (--locked[type].recursion)
  181. return;
  182. locked[type].seq = 0;
  183. locked[type].pc = 0;
  184. }
  185. void CheckNoLocks() {
  186. for (int i = 0; i < mutex_type_count; i++) CHECK_EQ(locked[i].recursion, 0);
  187. }
  188. bool Initialize(MutexType type) {
  189. if (type == MutexUnchecked || type == MutexInvalid)
  190. return false;
  191. CHECK_GT(type, MutexInvalid);
  192. if (initialized != 0)
  193. return initialized > 0;
  194. initialized = -1;
  195. SpinMutexLock lock(&mutex_meta_mtx);
  196. if (mutex_type_count < 0)
  197. DebugMutexInit();
  198. initialized = mutex_type_count ? 1 : -1;
  199. return initialized > 0;
  200. }
  201. };
  202. static THREADLOCAL InternalDeadlockDetector deadlock_detector;
  203. void CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); }
  204. void CheckedMutex::UnlockImpl() { deadlock_detector.Unlock(type_); }
  205. void CheckedMutex::CheckNoLocksImpl() { deadlock_detector.CheckNoLocks(); }
  206. #endif
  207. } // namespace __sanitizer