sanitizer_persistent_allocator.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. //===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // A fast memory allocator that does not support free() nor realloc().
  10. // All allocations are forever.
  11. //===----------------------------------------------------------------------===//
  12. #ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
  13. #define SANITIZER_PERSISTENT_ALLOCATOR_H
  14. #include "sanitizer_internal_defs.h"
  15. #include "sanitizer_mutex.h"
  16. #include "sanitizer_atomic.h"
  17. #include "sanitizer_common.h"
  18. namespace __sanitizer {
  19. template <typename T>
  20. class PersistentAllocator {
  21. public:
  22. T *alloc(uptr count = 1);
  23. uptr allocated() const { return atomic_load_relaxed(&mapped_size); }
  24. void TestOnlyUnmap();
  25. private:
  26. T *tryAlloc(uptr count);
  27. T *refillAndAlloc(uptr count);
  28. mutable StaticSpinMutex mtx; // Protects alloc of new blocks.
  29. atomic_uintptr_t region_pos; // Region allocator for Node's.
  30. atomic_uintptr_t region_end;
  31. atomic_uintptr_t mapped_size;
  32. struct BlockInfo {
  33. const BlockInfo *next;
  34. uptr ptr;
  35. uptr size;
  36. };
  37. const BlockInfo *curr;
  38. };
  39. template <typename T>
  40. inline T *PersistentAllocator<T>::tryAlloc(uptr count) {
  41. // Optimisic lock-free allocation, essentially try to bump the region ptr.
  42. for (;;) {
  43. uptr cmp = atomic_load(&region_pos, memory_order_acquire);
  44. uptr end = atomic_load(&region_end, memory_order_acquire);
  45. uptr size = count * sizeof(T);
  46. if (cmp == 0 || cmp + size > end)
  47. return nullptr;
  48. if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
  49. memory_order_acquire))
  50. return reinterpret_cast<T *>(cmp);
  51. }
  52. }
  53. template <typename T>
  54. inline T *PersistentAllocator<T>::alloc(uptr count) {
  55. // First, try to allocate optimisitically.
  56. T *s = tryAlloc(count);
  57. if (LIKELY(s))
  58. return s;
  59. return refillAndAlloc(count);
  60. }
  61. template <typename T>
  62. inline T *PersistentAllocator<T>::refillAndAlloc(uptr count) {
  63. // If failed, lock, retry and alloc new superblock.
  64. SpinMutexLock l(&mtx);
  65. for (;;) {
  66. T *s = tryAlloc(count);
  67. if (s)
  68. return s;
  69. atomic_store(&region_pos, 0, memory_order_relaxed);
  70. uptr size = count * sizeof(T) + sizeof(BlockInfo);
  71. uptr allocsz = RoundUpTo(Max<uptr>(size, 64u * 1024u), GetPageSizeCached());
  72. uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
  73. BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1;
  74. new_block->next = curr;
  75. new_block->ptr = mem;
  76. new_block->size = allocsz;
  77. curr = new_block;
  78. atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed);
  79. allocsz -= sizeof(BlockInfo);
  80. atomic_store(&region_end, mem + allocsz, memory_order_release);
  81. atomic_store(&region_pos, mem, memory_order_release);
  82. }
  83. }
  84. template <typename T>
  85. void PersistentAllocator<T>::TestOnlyUnmap() {
  86. while (curr) {
  87. uptr mem = curr->ptr;
  88. uptr allocsz = curr->size;
  89. curr = curr->next;
  90. UnmapOrDie((void *)mem, allocsz);
  91. }
  92. internal_memset(this, 0, sizeof(*this));
  93. }
  94. } // namespace __sanitizer
  95. #endif // SANITIZER_PERSISTENT_ALLOCATOR_H