sanitizer_allocator_primary32.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // Part of the Sanitizer Allocator.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef SANITIZER_ALLOCATOR_H
  13. #error This file must be included inside sanitizer_allocator.h
  14. #endif
  15. template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
  16. // SizeClassAllocator32 -- allocator for 32-bit address space.
  17. // This allocator can theoretically be used on 64-bit arch, but there it is less
  18. // efficient than SizeClassAllocator64.
  19. //
  20. // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
  21. // be returned by MmapOrDie().
  22. //
  23. // Region:
  24. // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
  25. // kRegionSize).
  26. // Since the regions are aligned by kRegionSize, there are exactly
  27. // kNumPossibleRegions possible regions in the address space and so we keep
  28. // a ByteMap possible_regions to store the size classes of each Region.
  29. // 0 size class means the region is not used by the allocator.
  30. //
  31. // One Region is used to allocate chunks of a single size class.
  32. // A Region looks like this:
  33. // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
  34. //
  35. // In order to avoid false sharing the objects of this class should be
  36. // chache-line aligned.
  37. struct SizeClassAllocator32FlagMasks { // Bit masks.
  38. enum {
  39. kRandomShuffleChunks = 1,
  40. kUseSeparateSizeClassForBatch = 2,
  41. };
  42. };
  43. template <class Params>
  44. class SizeClassAllocator32 {
  45. private:
  46. static const u64 kTwoLevelByteMapSize1 =
  47. (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12;
  48. static const u64 kMinFirstMapSizeTwoLevelByteMap = 4;
  49. public:
  50. using AddressSpaceView = typename Params::AddressSpaceView;
  51. static const uptr kSpaceBeg = Params::kSpaceBeg;
  52. static const u64 kSpaceSize = Params::kSpaceSize;
  53. static const uptr kMetadataSize = Params::kMetadataSize;
  54. typedef typename Params::SizeClassMap SizeClassMap;
  55. static const uptr kRegionSizeLog = Params::kRegionSizeLog;
  56. typedef typename Params::MapUnmapCallback MapUnmapCallback;
  57. using ByteMap = typename conditional<
  58. (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap),
  59. FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog),
  60. AddressSpaceView>,
  61. TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type;
  62. COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||
  63. (kSpaceSize & (kSpaceSize - 1)) == 0);
  64. static const bool kRandomShuffleChunks = Params::kFlags &
  65. SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
  66. static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
  67. SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
  68. struct TransferBatch {
  69. static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
  70. void SetFromArray(void *batch[], uptr count) {
  71. DCHECK_LE(count, kMaxNumCached);
  72. count_ = count;
  73. for (uptr i = 0; i < count; i++)
  74. batch_[i] = batch[i];
  75. }
  76. uptr Count() const { return count_; }
  77. void Clear() { count_ = 0; }
  78. void Add(void *ptr) {
  79. batch_[count_++] = ptr;
  80. DCHECK_LE(count_, kMaxNumCached);
  81. }
  82. void CopyToArray(void *to_batch[]) const {
  83. for (uptr i = 0, n = Count(); i < n; i++)
  84. to_batch[i] = batch_[i];
  85. }
  86. // How much memory do we need for a batch containing n elements.
  87. static uptr AllocationSizeRequiredForNElements(uptr n) {
  88. return sizeof(uptr) * 2 + sizeof(void *) * n;
  89. }
  90. static uptr MaxCached(uptr size) {
  91. return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
  92. }
  93. TransferBatch *next;
  94. private:
  95. uptr count_;
  96. void *batch_[kMaxNumCached];
  97. };
  98. static const uptr kBatchSize = sizeof(TransferBatch);
  99. COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
  100. COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
  101. static uptr ClassIdToSize(uptr class_id) {
  102. return (class_id == SizeClassMap::kBatchClassID) ?
  103. kBatchSize : SizeClassMap::Size(class_id);
  104. }
  105. typedef SizeClassAllocator32<Params> ThisT;
  106. typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
  107. void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
  108. CHECK(!heap_start);
  109. possible_regions.Init();
  110. internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
  111. }
  112. s32 ReleaseToOSIntervalMs() const {
  113. return kReleaseToOSIntervalNever;
  114. }
  115. void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
  116. // This is empty here. Currently only implemented in 64-bit allocator.
  117. }
  118. void ForceReleaseToOS() {
  119. // Currently implemented in 64-bit allocator only.
  120. }
  121. void *MapWithCallback(uptr size) {
  122. void *res = MmapOrDie(size, PrimaryAllocatorName);
  123. MapUnmapCallback().OnMap((uptr)res, size);
  124. return res;
  125. }
  126. void UnmapWithCallback(uptr beg, uptr size) {
  127. MapUnmapCallback().OnUnmap(beg, size);
  128. UnmapOrDie(reinterpret_cast<void *>(beg), size);
  129. }
  130. static bool CanAllocate(uptr size, uptr alignment) {
  131. return size <= SizeClassMap::kMaxSize &&
  132. alignment <= SizeClassMap::kMaxSize;
  133. }
  134. void *GetMetaData(const void *p) {
  135. CHECK(kMetadataSize);
  136. CHECK(PointerIsMine(p));
  137. uptr mem = reinterpret_cast<uptr>(p);
  138. uptr beg = ComputeRegionBeg(mem);
  139. uptr size = ClassIdToSize(GetSizeClass(p));
  140. u32 offset = mem - beg;
  141. uptr n = offset / (u32)size; // 32-bit division
  142. uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
  143. return reinterpret_cast<void*>(meta);
  144. }
  145. NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
  146. uptr class_id) {
  147. DCHECK_LT(class_id, kNumClasses);
  148. SizeClassInfo *sci = GetSizeClassInfo(class_id);
  149. SpinMutexLock l(&sci->mutex);
  150. if (sci->free_list.empty()) {
  151. if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
  152. return nullptr;
  153. DCHECK(!sci->free_list.empty());
  154. }
  155. TransferBatch *b = sci->free_list.front();
  156. sci->free_list.pop_front();
  157. return b;
  158. }
  159. NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
  160. TransferBatch *b) {
  161. DCHECK_LT(class_id, kNumClasses);
  162. CHECK_GT(b->Count(), 0);
  163. SizeClassInfo *sci = GetSizeClassInfo(class_id);
  164. SpinMutexLock l(&sci->mutex);
  165. sci->free_list.push_front(b);
  166. }
  167. bool PointerIsMine(const void *p) const {
  168. uptr mem = reinterpret_cast<uptr>(p);
  169. if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
  170. mem &= (kSpaceSize - 1);
  171. if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
  172. return false;
  173. return GetSizeClass(p) != 0;
  174. }
  175. uptr GetSizeClass(const void *p) const {
  176. uptr id = ComputeRegionId(reinterpret_cast<uptr>(p));
  177. return possible_regions.contains(id) ? possible_regions[id] : 0;
  178. }
  179. void *GetBlockBegin(const void *p) {
  180. CHECK(PointerIsMine(p));
  181. uptr mem = reinterpret_cast<uptr>(p);
  182. uptr beg = ComputeRegionBeg(mem);
  183. uptr size = ClassIdToSize(GetSizeClass(p));
  184. u32 offset = mem - beg;
  185. u32 n = offset / (u32)size; // 32-bit division
  186. uptr res = beg + (n * (u32)size);
  187. return reinterpret_cast<void*>(res);
  188. }
  189. uptr GetActuallyAllocatedSize(void *p) {
  190. CHECK(PointerIsMine(p));
  191. return ClassIdToSize(GetSizeClass(p));
  192. }
  193. static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
  194. uptr TotalMemoryUsed() {
  195. // No need to lock here.
  196. uptr res = 0;
  197. for (uptr i = 0; i < kNumPossibleRegions; i++)
  198. if (possible_regions[i])
  199. res += kRegionSize;
  200. return res;
  201. }
  202. void TestOnlyUnmap() {
  203. for (uptr i = 0; i < kNumPossibleRegions; i++)
  204. if (possible_regions[i])
  205. UnmapWithCallback((i * kRegionSize), kRegionSize);
  206. }
  207. // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
  208. // introspection API.
  209. void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
  210. for (uptr i = 0; i < kNumClasses; i++) {
  211. GetSizeClassInfo(i)->mutex.Lock();
  212. }
  213. }
  214. void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
  215. for (int i = kNumClasses - 1; i >= 0; i--) {
  216. GetSizeClassInfo(i)->mutex.Unlock();
  217. }
  218. }
  219. // Iterate over all existing chunks.
  220. // The allocator must be locked when calling this function.
  221. void ForEachChunk(ForEachChunkCallback callback, void *arg) const {
  222. for (uptr region = 0; region < kNumPossibleRegions; region++)
  223. if (possible_regions.contains(region) && possible_regions[region]) {
  224. uptr chunk_size = ClassIdToSize(possible_regions[region]);
  225. uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
  226. uptr region_beg = region * kRegionSize;
  227. for (uptr chunk = region_beg;
  228. chunk < region_beg + max_chunks_in_region * chunk_size;
  229. chunk += chunk_size) {
  230. // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
  231. callback(chunk, arg);
  232. }
  233. }
  234. }
  235. void PrintStats() {}
  236. static uptr AdditionalSize() { return 0; }
  237. typedef SizeClassMap SizeClassMapT;
  238. static const uptr kNumClasses = SizeClassMap::kNumClasses;
  239. private:
  240. static const uptr kRegionSize = 1 << kRegionSizeLog;
  241. static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
  242. struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
  243. StaticSpinMutex mutex;
  244. IntrusiveList<TransferBatch> free_list;
  245. u32 rand_state;
  246. };
  247. COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
  248. uptr ComputeRegionId(uptr mem) const {
  249. if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
  250. mem &= (kSpaceSize - 1);
  251. const uptr res = mem >> kRegionSizeLog;
  252. CHECK_LT(res, kNumPossibleRegions);
  253. return res;
  254. }
  255. uptr ComputeRegionBeg(uptr mem) const { return mem & ~(kRegionSize - 1); }
  256. uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
  257. DCHECK_LT(class_id, kNumClasses);
  258. const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
  259. kRegionSize, kRegionSize, PrimaryAllocatorName));
  260. if (UNLIKELY(!res))
  261. return 0;
  262. MapUnmapCallback().OnMap(res, kRegionSize);
  263. stat->Add(AllocatorStatMapped, kRegionSize);
  264. CHECK(IsAligned(res, kRegionSize));
  265. possible_regions[ComputeRegionId(res)] = class_id;
  266. return res;
  267. }
  268. SizeClassInfo *GetSizeClassInfo(uptr class_id) {
  269. DCHECK_LT(class_id, kNumClasses);
  270. return &size_class_info_array[class_id];
  271. }
  272. bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
  273. TransferBatch **current_batch, uptr max_count,
  274. uptr *pointers_array, uptr count) {
  275. // If using a separate class for batches, we do not need to shuffle it.
  276. if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
  277. class_id != SizeClassMap::kBatchClassID))
  278. RandomShuffle(pointers_array, count, &sci->rand_state);
  279. TransferBatch *b = *current_batch;
  280. for (uptr i = 0; i < count; i++) {
  281. if (!b) {
  282. b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
  283. if (UNLIKELY(!b))
  284. return false;
  285. b->Clear();
  286. }
  287. b->Add((void*)pointers_array[i]);
  288. if (b->Count() == max_count) {
  289. sci->free_list.push_back(b);
  290. b = nullptr;
  291. }
  292. }
  293. *current_batch = b;
  294. return true;
  295. }
  296. bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
  297. SizeClassInfo *sci, uptr class_id) {
  298. const uptr region = AllocateRegion(stat, class_id);
  299. if (UNLIKELY(!region))
  300. return false;
  301. if (kRandomShuffleChunks)
  302. if (UNLIKELY(sci->rand_state == 0))
  303. // The random state is initialized from ASLR (PIE) and time.
  304. sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
  305. const uptr size = ClassIdToSize(class_id);
  306. const uptr n_chunks = kRegionSize / (size + kMetadataSize);
  307. const uptr max_count = TransferBatch::MaxCached(size);
  308. DCHECK_GT(max_count, 0);
  309. TransferBatch *b = nullptr;
  310. constexpr uptr kShuffleArraySize = 48;
  311. uptr shuffle_array[kShuffleArraySize];
  312. uptr count = 0;
  313. for (uptr i = region; i < region + n_chunks * size; i += size) {
  314. shuffle_array[count++] = i;
  315. if (count == kShuffleArraySize) {
  316. if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
  317. shuffle_array, count)))
  318. return false;
  319. count = 0;
  320. }
  321. }
  322. if (count) {
  323. if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
  324. shuffle_array, count)))
  325. return false;
  326. }
  327. if (b) {
  328. CHECK_GT(b->Count(), 0);
  329. sci->free_list.push_back(b);
  330. }
  331. return true;
  332. }
  333. ByteMap possible_regions;
  334. SizeClassInfo size_class_info_array[kNumClasses];
  335. };