123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271 |
- //===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
- //
- // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- // See https://llvm.org/LICENSE.txt for license information.
- // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- //
- //===----------------------------------------------------------------------===//
- //
- // Part of the Sanitizer Allocator.
- //
- //===----------------------------------------------------------------------===//
- #ifndef SANITIZER_ALLOCATOR_H
- #error This file must be included inside sanitizer_allocator.h
- #endif
- // Cache used by SizeClassAllocator64.
- template <class SizeClassAllocator>
- struct SizeClassAllocator64LocalCache {
- typedef SizeClassAllocator Allocator;
- typedef MemoryMapper<Allocator> MemoryMapperT;
- void Init(AllocatorGlobalStats *s) {
- stats_.Init();
- if (s)
- s->Register(&stats_);
- }
- void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
- Drain(allocator);
- if (s)
- s->Unregister(&stats_);
- }
- void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- PerClass *c = &per_class_[class_id];
- if (UNLIKELY(c->count == 0)) {
- if (UNLIKELY(!Refill(c, allocator, class_id)))
- return nullptr;
- DCHECK_GT(c->count, 0);
- }
- CompactPtrT chunk = c->chunks[--c->count];
- stats_.Add(AllocatorStatAllocated, c->class_size);
- return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
- allocator->GetRegionBeginBySizeClass(class_id), chunk));
- }
- void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- // If the first allocator call on a new thread is a deallocation, then
- // max_count will be zero, leading to check failure.
- PerClass *c = &per_class_[class_id];
- InitCache(c);
- if (UNLIKELY(c->count == c->max_count))
- DrainHalfMax(c, allocator, class_id);
- CompactPtrT chunk = allocator->PointerToCompactPtr(
- allocator->GetRegionBeginBySizeClass(class_id),
- reinterpret_cast<uptr>(p));
- c->chunks[c->count++] = chunk;
- stats_.Sub(AllocatorStatAllocated, c->class_size);
- }
- void Drain(SizeClassAllocator *allocator) {
- MemoryMapperT memory_mapper(*allocator);
- for (uptr i = 1; i < kNumClasses; i++) {
- PerClass *c = &per_class_[i];
- while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
- }
- }
- private:
- typedef typename Allocator::SizeClassMapT SizeClassMap;
- static const uptr kNumClasses = SizeClassMap::kNumClasses;
- typedef typename Allocator::CompactPtrT CompactPtrT;
- struct PerClass {
- u32 count;
- u32 max_count;
- uptr class_size;
- CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
- };
- PerClass per_class_[kNumClasses];
- AllocatorStats stats_;
- void InitCache(PerClass *c) {
- if (LIKELY(c->max_count))
- return;
- for (uptr i = 1; i < kNumClasses; i++) {
- PerClass *c = &per_class_[i];
- const uptr size = Allocator::ClassIdToSize(i);
- c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
- c->class_size = size;
- }
- DCHECK_NE(c->max_count, 0UL);
- }
- NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
- uptr class_id) {
- InitCache(c);
- const uptr num_requested_chunks = c->max_count / 2;
- if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
- num_requested_chunks)))
- return false;
- c->count = num_requested_chunks;
- return true;
- }
- NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
- uptr class_id) {
- MemoryMapperT memory_mapper(*allocator);
- Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
- }
- void Drain(MemoryMapperT *memory_mapper, PerClass *c,
- SizeClassAllocator *allocator, uptr class_id, uptr count) {
- CHECK_GE(c->count, count);
- const uptr first_idx_to_drain = c->count - count;
- c->count -= count;
- allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
- &c->chunks[first_idx_to_drain], count);
- }
- };
- // Cache used by SizeClassAllocator32.
- template <class SizeClassAllocator>
- struct SizeClassAllocator32LocalCache {
- typedef SizeClassAllocator Allocator;
- typedef typename Allocator::TransferBatch TransferBatch;
- void Init(AllocatorGlobalStats *s) {
- stats_.Init();
- if (s)
- s->Register(&stats_);
- }
- // Returns a TransferBatch suitable for class_id.
- TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
- TransferBatch *b) {
- if (uptr batch_class_id = per_class_[class_id].batch_class_id)
- return (TransferBatch*)Allocate(allocator, batch_class_id);
- return b;
- }
- // Destroys TransferBatch b.
- void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
- TransferBatch *b) {
- if (uptr batch_class_id = per_class_[class_id].batch_class_id)
- Deallocate(allocator, batch_class_id, b);
- }
- void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
- Drain(allocator);
- if (s)
- s->Unregister(&stats_);
- }
- void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- PerClass *c = &per_class_[class_id];
- if (UNLIKELY(c->count == 0)) {
- if (UNLIKELY(!Refill(c, allocator, class_id)))
- return nullptr;
- DCHECK_GT(c->count, 0);
- }
- void *res = c->batch[--c->count];
- PREFETCH(c->batch[c->count - 1]);
- stats_.Add(AllocatorStatAllocated, c->class_size);
- return res;
- }
- void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- // If the first allocator call on a new thread is a deallocation, then
- // max_count will be zero, leading to check failure.
- PerClass *c = &per_class_[class_id];
- InitCache(c);
- if (UNLIKELY(c->count == c->max_count))
- Drain(c, allocator, class_id);
- c->batch[c->count++] = p;
- stats_.Sub(AllocatorStatAllocated, c->class_size);
- }
- void Drain(SizeClassAllocator *allocator) {
- for (uptr i = 1; i < kNumClasses; i++) {
- PerClass *c = &per_class_[i];
- while (c->count > 0)
- Drain(c, allocator, i);
- }
- }
- private:
- typedef typename Allocator::SizeClassMapT SizeClassMap;
- static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
- static const uptr kNumClasses = SizeClassMap::kNumClasses;
- // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
- // allocated from kBatchClassID size class (except for those that are needed
- // for kBatchClassID itself). The goal is to have TransferBatches in a totally
- // different region of RAM to improve security.
- static const bool kUseSeparateSizeClassForBatch =
- Allocator::kUseSeparateSizeClassForBatch;
- struct PerClass {
- uptr count;
- uptr max_count;
- uptr class_size;
- uptr batch_class_id;
- void *batch[2 * TransferBatch::kMaxNumCached];
- };
- PerClass per_class_[kNumClasses];
- AllocatorStats stats_;
- void InitCache(PerClass *c) {
- if (LIKELY(c->max_count))
- return;
- const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
- for (uptr i = 1; i < kNumClasses; i++) {
- PerClass *c = &per_class_[i];
- const uptr size = Allocator::ClassIdToSize(i);
- const uptr max_cached = TransferBatch::MaxCached(size);
- c->max_count = 2 * max_cached;
- c->class_size = size;
- // Precompute the class id to use to store batches for the current class
- // id. 0 means the class size is large enough to store a batch within one
- // of the chunks. If using a separate size class, it will always be
- // kBatchClassID, except for kBatchClassID itself.
- if (kUseSeparateSizeClassForBatch) {
- c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
- } else {
- c->batch_class_id = (size <
- TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
- batch_class_id : 0;
- }
- }
- DCHECK_NE(c->max_count, 0UL);
- }
- NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
- uptr class_id) {
- InitCache(c);
- TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
- if (UNLIKELY(!b))
- return false;
- CHECK_GT(b->Count(), 0);
- b->CopyToArray(c->batch);
- c->count = b->Count();
- DestroyBatch(class_id, allocator, b);
- return true;
- }
- NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
- uptr class_id) {
- const uptr count = Min(c->max_count / 2, c->count);
- const uptr first_idx_to_drain = c->count - count;
- TransferBatch *b = CreateBatch(
- class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
- // Failure to allocate a batch while releasing memory is non recoverable.
- // TODO(alekseys): Figure out how to do it without allocating a new batch.
- if (UNLIKELY(!b)) {
- Report("FATAL: Internal error: %s's allocator failed to allocate a "
- "transfer batch.\n", SanitizerToolName);
- Die();
- }
- b->SetFromArray(&c->batch[first_idx_to_drain], count);
- c->count -= count;
- allocator->DeallocateBatch(&stats_, class_id, b);
- }
- };
|