sanitizer_allocator_local_cache.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Part of the Sanitizer Allocator.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef SANITIZER_ALLOCATOR_H
  13. #error This file must be included inside sanitizer_allocator.h
  14. #endif
  15. // Cache used by SizeClassAllocator64.
  16. template <class SizeClassAllocator>
  17. struct SizeClassAllocator64LocalCache {
  18. typedef SizeClassAllocator Allocator;
  19. typedef MemoryMapper<Allocator> MemoryMapperT;
  20. void Init(AllocatorGlobalStats *s) {
  21. stats_.Init();
  22. if (s)
  23. s->Register(&stats_);
  24. }
  25. void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
  26. Drain(allocator);
  27. if (s)
  28. s->Unregister(&stats_);
  29. }
  30. void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
  31. CHECK_NE(class_id, 0UL);
  32. CHECK_LT(class_id, kNumClasses);
  33. PerClass *c = &per_class_[class_id];
  34. if (UNLIKELY(c->count == 0)) {
  35. if (UNLIKELY(!Refill(c, allocator, class_id)))
  36. return nullptr;
  37. DCHECK_GT(c->count, 0);
  38. }
  39. CompactPtrT chunk = c->chunks[--c->count];
  40. stats_.Add(AllocatorStatAllocated, c->class_size);
  41. return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
  42. allocator->GetRegionBeginBySizeClass(class_id), chunk));
  43. }
  44. void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
  45. CHECK_NE(class_id, 0UL);
  46. CHECK_LT(class_id, kNumClasses);
  47. // If the first allocator call on a new thread is a deallocation, then
  48. // max_count will be zero, leading to check failure.
  49. PerClass *c = &per_class_[class_id];
  50. InitCache(c);
  51. if (UNLIKELY(c->count == c->max_count))
  52. DrainHalfMax(c, allocator, class_id);
  53. CompactPtrT chunk = allocator->PointerToCompactPtr(
  54. allocator->GetRegionBeginBySizeClass(class_id),
  55. reinterpret_cast<uptr>(p));
  56. c->chunks[c->count++] = chunk;
  57. stats_.Sub(AllocatorStatAllocated, c->class_size);
  58. }
  59. void Drain(SizeClassAllocator *allocator) {
  60. MemoryMapperT memory_mapper(*allocator);
  61. for (uptr i = 1; i < kNumClasses; i++) {
  62. PerClass *c = &per_class_[i];
  63. while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
  64. }
  65. }
  66. private:
  67. typedef typename Allocator::SizeClassMapT SizeClassMap;
  68. static const uptr kNumClasses = SizeClassMap::kNumClasses;
  69. typedef typename Allocator::CompactPtrT CompactPtrT;
  70. struct PerClass {
  71. u32 count;
  72. u32 max_count;
  73. uptr class_size;
  74. CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
  75. };
  76. PerClass per_class_[kNumClasses];
  77. AllocatorStats stats_;
  78. void InitCache(PerClass *c) {
  79. if (LIKELY(c->max_count))
  80. return;
  81. for (uptr i = 1; i < kNumClasses; i++) {
  82. PerClass *c = &per_class_[i];
  83. const uptr size = Allocator::ClassIdToSize(i);
  84. c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
  85. c->class_size = size;
  86. }
  87. DCHECK_NE(c->max_count, 0UL);
  88. }
  89. NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
  90. uptr class_id) {
  91. InitCache(c);
  92. const uptr num_requested_chunks = c->max_count / 2;
  93. if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
  94. num_requested_chunks)))
  95. return false;
  96. c->count = num_requested_chunks;
  97. return true;
  98. }
  99. NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
  100. uptr class_id) {
  101. MemoryMapperT memory_mapper(*allocator);
  102. Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
  103. }
  104. void Drain(MemoryMapperT *memory_mapper, PerClass *c,
  105. SizeClassAllocator *allocator, uptr class_id, uptr count) {
  106. CHECK_GE(c->count, count);
  107. const uptr first_idx_to_drain = c->count - count;
  108. c->count -= count;
  109. allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
  110. &c->chunks[first_idx_to_drain], count);
  111. }
  112. };
  113. // Cache used by SizeClassAllocator32.
  114. template <class SizeClassAllocator>
  115. struct SizeClassAllocator32LocalCache {
  116. typedef SizeClassAllocator Allocator;
  117. typedef typename Allocator::TransferBatch TransferBatch;
  118. void Init(AllocatorGlobalStats *s) {
  119. stats_.Init();
  120. if (s)
  121. s->Register(&stats_);
  122. }
  123. // Returns a TransferBatch suitable for class_id.
  124. TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
  125. TransferBatch *b) {
  126. if (uptr batch_class_id = per_class_[class_id].batch_class_id)
  127. return (TransferBatch*)Allocate(allocator, batch_class_id);
  128. return b;
  129. }
  130. // Destroys TransferBatch b.
  131. void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
  132. TransferBatch *b) {
  133. if (uptr batch_class_id = per_class_[class_id].batch_class_id)
  134. Deallocate(allocator, batch_class_id, b);
  135. }
  136. void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
  137. Drain(allocator);
  138. if (s)
  139. s->Unregister(&stats_);
  140. }
  141. void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
  142. CHECK_NE(class_id, 0UL);
  143. CHECK_LT(class_id, kNumClasses);
  144. PerClass *c = &per_class_[class_id];
  145. if (UNLIKELY(c->count == 0)) {
  146. if (UNLIKELY(!Refill(c, allocator, class_id)))
  147. return nullptr;
  148. DCHECK_GT(c->count, 0);
  149. }
  150. void *res = c->batch[--c->count];
  151. PREFETCH(c->batch[c->count - 1]);
  152. stats_.Add(AllocatorStatAllocated, c->class_size);
  153. return res;
  154. }
  155. void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
  156. CHECK_NE(class_id, 0UL);
  157. CHECK_LT(class_id, kNumClasses);
  158. // If the first allocator call on a new thread is a deallocation, then
  159. // max_count will be zero, leading to check failure.
  160. PerClass *c = &per_class_[class_id];
  161. InitCache(c);
  162. if (UNLIKELY(c->count == c->max_count))
  163. Drain(c, allocator, class_id);
  164. c->batch[c->count++] = p;
  165. stats_.Sub(AllocatorStatAllocated, c->class_size);
  166. }
  167. void Drain(SizeClassAllocator *allocator) {
  168. for (uptr i = 1; i < kNumClasses; i++) {
  169. PerClass *c = &per_class_[i];
  170. while (c->count > 0)
  171. Drain(c, allocator, i);
  172. }
  173. }
  174. private:
  175. typedef typename Allocator::SizeClassMapT SizeClassMap;
  176. static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
  177. static const uptr kNumClasses = SizeClassMap::kNumClasses;
  178. // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
  179. // allocated from kBatchClassID size class (except for those that are needed
  180. // for kBatchClassID itself). The goal is to have TransferBatches in a totally
  181. // different region of RAM to improve security.
  182. static const bool kUseSeparateSizeClassForBatch =
  183. Allocator::kUseSeparateSizeClassForBatch;
  184. struct PerClass {
  185. uptr count;
  186. uptr max_count;
  187. uptr class_size;
  188. uptr batch_class_id;
  189. void *batch[2 * TransferBatch::kMaxNumCached];
  190. };
  191. PerClass per_class_[kNumClasses];
  192. AllocatorStats stats_;
  193. void InitCache(PerClass *c) {
  194. if (LIKELY(c->max_count))
  195. return;
  196. const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
  197. for (uptr i = 1; i < kNumClasses; i++) {
  198. PerClass *c = &per_class_[i];
  199. const uptr size = Allocator::ClassIdToSize(i);
  200. const uptr max_cached = TransferBatch::MaxCached(size);
  201. c->max_count = 2 * max_cached;
  202. c->class_size = size;
  203. // Precompute the class id to use to store batches for the current class
  204. // id. 0 means the class size is large enough to store a batch within one
  205. // of the chunks. If using a separate size class, it will always be
  206. // kBatchClassID, except for kBatchClassID itself.
  207. if (kUseSeparateSizeClassForBatch) {
  208. c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
  209. } else {
  210. c->batch_class_id = (size <
  211. TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
  212. batch_class_id : 0;
  213. }
  214. }
  215. DCHECK_NE(c->max_count, 0UL);
  216. }
  217. NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
  218. uptr class_id) {
  219. InitCache(c);
  220. TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
  221. if (UNLIKELY(!b))
  222. return false;
  223. CHECK_GT(b->Count(), 0);
  224. b->CopyToArray(c->batch);
  225. c->count = b->Count();
  226. DestroyBatch(class_id, allocator, b);
  227. return true;
  228. }
  229. NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
  230. uptr class_id) {
  231. const uptr count = Min(c->max_count / 2, c->count);
  232. const uptr first_idx_to_drain = c->count - count;
  233. TransferBatch *b = CreateBatch(
  234. class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
  235. // Failure to allocate a batch while releasing memory is non recoverable.
  236. // TODO(alekseys): Figure out how to do it without allocating a new batch.
  237. if (UNLIKELY(!b)) {
  238. Report("FATAL: Internal error: %s's allocator failed to allocate a "
  239. "transfer batch.\n", SanitizerToolName);
  240. Die();
  241. }
  242. b->SetFromArray(&c->batch[first_idx_to_drain], count);
  243. c->count -= count;
  244. allocator->DeallocateBatch(&stats_, class_id, b);
  245. }
  246. };