sanitizer_allocator.cpp 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. //===-- sanitizer_allocator.cpp -------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is shared between AddressSanitizer and ThreadSanitizer
  10. // run-time libraries.
  11. // This allocator is used inside run-times.
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_allocator.h"
  14. #include "sanitizer_allocator_checks.h"
  15. #include "sanitizer_allocator_internal.h"
  16. #include "sanitizer_atomic.h"
  17. #include "sanitizer_common.h"
  18. namespace __sanitizer {
  19. // Default allocator names.
  20. const char *PrimaryAllocatorName = "SizeClassAllocator";
  21. const char *SecondaryAllocatorName = "LargeMmapAllocator";
  22. // ThreadSanitizer for Go uses libc malloc/free.
  23. #if defined(SANITIZER_USE_MALLOC)
  24. # if SANITIZER_LINUX && !SANITIZER_ANDROID
  25. extern "C" void *__libc_malloc(uptr size);
  26. # if !SANITIZER_GO
  27. extern "C" void *__libc_memalign(uptr alignment, uptr size);
  28. # endif
  29. extern "C" void *__libc_realloc(void *ptr, uptr size);
  30. extern "C" void __libc_free(void *ptr);
  31. # else
  32. # include <stdlib.h>
  33. # define __libc_malloc malloc
  34. # if !SANITIZER_GO
  35. static void *__libc_memalign(uptr alignment, uptr size) {
  36. void *p;
  37. uptr error = posix_memalign(&p, alignment, size);
  38. if (error) return nullptr;
  39. return p;
  40. }
  41. # endif
  42. # define __libc_realloc realloc
  43. # define __libc_free free
  44. # endif
  45. static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
  46. uptr alignment) {
  47. (void)cache;
  48. #if !SANITIZER_GO
  49. if (alignment == 0)
  50. return __libc_malloc(size);
  51. else
  52. return __libc_memalign(alignment, size);
  53. #else
  54. // Windows does not provide __libc_memalign/posix_memalign. It provides
  55. // __aligned_malloc, but the allocated blocks can't be passed to free,
  56. // they need to be passed to __aligned_free. InternalAlloc interface does
  57. // not account for such requirement. Alignemnt does not seem to be used
  58. // anywhere in runtime, so just call __libc_malloc for now.
  59. DCHECK_EQ(alignment, 0);
  60. return __libc_malloc(size);
  61. #endif
  62. }
  63. static void *RawInternalRealloc(void *ptr, uptr size,
  64. InternalAllocatorCache *cache) {
  65. (void)cache;
  66. return __libc_realloc(ptr, size);
  67. }
  68. static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
  69. (void)cache;
  70. __libc_free(ptr);
  71. }
  72. InternalAllocator *internal_allocator() {
  73. return 0;
  74. }
  75. #else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
  76. static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
  77. static atomic_uint8_t internal_allocator_initialized;
  78. static StaticSpinMutex internal_alloc_init_mu;
  79. static InternalAllocatorCache internal_allocator_cache;
  80. static StaticSpinMutex internal_allocator_cache_mu;
  81. InternalAllocator *internal_allocator() {
  82. InternalAllocator *internal_allocator_instance =
  83. reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
  84. if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
  85. SpinMutexLock l(&internal_alloc_init_mu);
  86. if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
  87. 0) {
  88. internal_allocator_instance->Init(kReleaseToOSIntervalNever);
  89. atomic_store(&internal_allocator_initialized, 1, memory_order_release);
  90. }
  91. }
  92. return internal_allocator_instance;
  93. }
  94. static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
  95. uptr alignment) {
  96. if (alignment == 0) alignment = 8;
  97. if (cache == 0) {
  98. SpinMutexLock l(&internal_allocator_cache_mu);
  99. return internal_allocator()->Allocate(&internal_allocator_cache, size,
  100. alignment);
  101. }
  102. return internal_allocator()->Allocate(cache, size, alignment);
  103. }
  104. static void *RawInternalRealloc(void *ptr, uptr size,
  105. InternalAllocatorCache *cache) {
  106. uptr alignment = 8;
  107. if (cache == 0) {
  108. SpinMutexLock l(&internal_allocator_cache_mu);
  109. return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
  110. size, alignment);
  111. }
  112. return internal_allocator()->Reallocate(cache, ptr, size, alignment);
  113. }
  114. static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
  115. if (!cache) {
  116. SpinMutexLock l(&internal_allocator_cache_mu);
  117. return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
  118. }
  119. internal_allocator()->Deallocate(cache, ptr);
  120. }
  121. #endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
  122. static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
  123. SetAllocatorOutOfMemory();
  124. Report("FATAL: %s: internal allocator is out of memory trying to allocate "
  125. "0x%zx bytes\n", SanitizerToolName, requested_size);
  126. Die();
  127. }
  128. void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
  129. void *p = RawInternalAlloc(size, cache, alignment);
  130. if (UNLIKELY(!p))
  131. ReportInternalAllocatorOutOfMemory(size);
  132. return p;
  133. }
  134. void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
  135. void *p = RawInternalRealloc(addr, size, cache);
  136. if (UNLIKELY(!p))
  137. ReportInternalAllocatorOutOfMemory(size);
  138. return p;
  139. }
  140. void *InternalReallocArray(void *addr, uptr count, uptr size,
  141. InternalAllocatorCache *cache) {
  142. if (UNLIKELY(CheckForCallocOverflow(count, size))) {
  143. Report(
  144. "FATAL: %s: reallocarray parameters overflow: count * size (%zd * %zd) "
  145. "cannot be represented in type size_t\n",
  146. SanitizerToolName, count, size);
  147. Die();
  148. }
  149. return InternalRealloc(addr, count * size, cache);
  150. }
  151. void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
  152. if (UNLIKELY(CheckForCallocOverflow(count, size))) {
  153. Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "
  154. "cannot be represented in type size_t\n", SanitizerToolName, count,
  155. size);
  156. Die();
  157. }
  158. void *p = InternalAlloc(count * size, cache);
  159. if (LIKELY(p))
  160. internal_memset(p, 0, count * size);
  161. return p;
  162. }
  163. void InternalFree(void *addr, InternalAllocatorCache *cache) {
  164. RawInternalFree(addr, cache);
  165. }
  166. // LowLevelAllocator
  167. constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
  168. static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
  169. static LowLevelAllocateCallback low_level_alloc_callback;
  170. void *LowLevelAllocator::Allocate(uptr size) {
  171. // Align allocation size.
  172. size = RoundUpTo(size, low_level_alloc_min_alignment);
  173. if (allocated_end_ - allocated_current_ < (sptr)size) {
  174. uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
  175. allocated_current_ =
  176. (char*)MmapOrDie(size_to_allocate, __func__);
  177. allocated_end_ = allocated_current_ + size_to_allocate;
  178. if (low_level_alloc_callback) {
  179. low_level_alloc_callback((uptr)allocated_current_,
  180. size_to_allocate);
  181. }
  182. }
  183. CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
  184. void *res = allocated_current_;
  185. allocated_current_ += size;
  186. return res;
  187. }
  188. void SetLowLevelAllocateMinAlignment(uptr alignment) {
  189. CHECK(IsPowerOfTwo(alignment));
  190. low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
  191. }
  192. void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
  193. low_level_alloc_callback = callback;
  194. }
  195. // Allocator's OOM and other errors handling support.
  196. static atomic_uint8_t allocator_out_of_memory = {0};
  197. static atomic_uint8_t allocator_may_return_null = {0};
  198. bool IsAllocatorOutOfMemory() {
  199. return atomic_load_relaxed(&allocator_out_of_memory);
  200. }
  201. void SetAllocatorOutOfMemory() {
  202. atomic_store_relaxed(&allocator_out_of_memory, 1);
  203. }
  204. bool AllocatorMayReturnNull() {
  205. return atomic_load(&allocator_may_return_null, memory_order_relaxed);
  206. }
  207. void SetAllocatorMayReturnNull(bool may_return_null) {
  208. atomic_store(&allocator_may_return_null, may_return_null,
  209. memory_order_relaxed);
  210. }
  211. void PrintHintAllocatorCannotReturnNull() {
  212. Report("HINT: if you don't care about these errors you may set "
  213. "allocator_may_return_null=1\n");
  214. }
  215. } // namespace __sanitizer