tsan_shadow.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. //===-- tsan_shadow.h -------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. #ifndef TSAN_SHADOW_H
  9. #define TSAN_SHADOW_H
  10. #include "tsan_defs.h"
  11. #include "tsan_trace.h"
  12. namespace __tsan {
  13. // FastState (from most significant bit):
  14. // ignore : 1
  15. // tid : kTidBits
  16. // unused : -
  17. // history_size : 3
  18. // epoch : kClkBits
  19. class FastState {
  20. public:
  21. FastState(u64 tid, u64 epoch) {
  22. x_ = tid << kTidShift;
  23. x_ |= epoch;
  24. DCHECK_EQ(tid, this->tid());
  25. DCHECK_EQ(epoch, this->epoch());
  26. DCHECK_EQ(GetIgnoreBit(), false);
  27. }
  28. explicit FastState(u64 x) : x_(x) {}
  29. u64 raw() const { return x_; }
  30. u64 tid() const {
  31. u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
  32. return res;
  33. }
  34. u64 TidWithIgnore() const {
  35. u64 res = x_ >> kTidShift;
  36. return res;
  37. }
  38. u64 epoch() const {
  39. u64 res = x_ & ((1ull << kClkBits) - 1);
  40. return res;
  41. }
  42. void IncrementEpoch() {
  43. u64 old_epoch = epoch();
  44. x_ += 1;
  45. DCHECK_EQ(old_epoch + 1, epoch());
  46. (void)old_epoch;
  47. }
  48. void SetIgnoreBit() { x_ |= kIgnoreBit; }
  49. void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
  50. bool GetIgnoreBit() const { return (s64)x_ < 0; }
  51. void SetHistorySize(int hs) {
  52. CHECK_GE(hs, 0);
  53. CHECK_LE(hs, 7);
  54. x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
  55. }
  56. ALWAYS_INLINE
  57. int GetHistorySize() const {
  58. return (int)((x_ >> kHistoryShift) & kHistoryMask);
  59. }
  60. void ClearHistorySize() { SetHistorySize(0); }
  61. ALWAYS_INLINE
  62. u64 GetTracePos() const {
  63. const int hs = GetHistorySize();
  64. // When hs == 0, the trace consists of 2 parts.
  65. const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
  66. return epoch() & mask;
  67. }
  68. private:
  69. friend class Shadow;
  70. static const int kTidShift = 64 - kTidBits - 1;
  71. static const u64 kIgnoreBit = 1ull << 63;
  72. static const u64 kFreedBit = 1ull << 63;
  73. static const u64 kHistoryShift = kClkBits;
  74. static const u64 kHistoryMask = 7;
  75. u64 x_;
  76. };
  77. // Shadow (from most significant bit):
  78. // freed : 1
  79. // tid : kTidBits
  80. // is_atomic : 1
  81. // is_read : 1
  82. // size_log : 2
  83. // addr0 : 3
  84. // epoch : kClkBits
  85. class Shadow : public FastState {
  86. public:
  87. explicit Shadow(u64 x) : FastState(x) {}
  88. explicit Shadow(const FastState &s) : FastState(s.x_) { ClearHistorySize(); }
  89. void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
  90. DCHECK_EQ((x_ >> kClkBits) & 31, 0);
  91. DCHECK_LE(addr0, 7);
  92. DCHECK_LE(kAccessSizeLog, 3);
  93. x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
  94. DCHECK_EQ(kAccessSizeLog, size_log());
  95. DCHECK_EQ(addr0, this->addr0());
  96. }
  97. void SetWrite(unsigned kAccessIsWrite) {
  98. DCHECK_EQ(x_ & kReadBit, 0);
  99. if (!kAccessIsWrite)
  100. x_ |= kReadBit;
  101. DCHECK_EQ(kAccessIsWrite, IsWrite());
  102. }
  103. void SetAtomic(bool kIsAtomic) {
  104. DCHECK(!IsAtomic());
  105. if (kIsAtomic)
  106. x_ |= kAtomicBit;
  107. DCHECK_EQ(IsAtomic(), kIsAtomic);
  108. }
  109. bool IsAtomic() const { return x_ & kAtomicBit; }
  110. bool IsZero() const { return x_ == 0; }
  111. static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
  112. u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
  113. DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
  114. return shifted_xor == 0;
  115. }
  116. static ALWAYS_INLINE bool Addr0AndSizeAreEqual(const Shadow s1,
  117. const Shadow s2) {
  118. u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
  119. return masked_xor == 0;
  120. }
  121. static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
  122. unsigned kS2AccessSize) {
  123. bool res = false;
  124. u64 diff = s1.addr0() - s2.addr0();
  125. if ((s64)diff < 0) { // s1.addr0 < s2.addr0
  126. // if (s1.addr0() + size1) > s2.addr0()) return true;
  127. if (s1.size() > -diff)
  128. res = true;
  129. } else {
  130. // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
  131. if (kS2AccessSize > diff)
  132. res = true;
  133. }
  134. DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
  135. DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
  136. return res;
  137. }
  138. u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
  139. u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
  140. bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
  141. bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
  142. // The idea behind the freed bit is as follows.
  143. // When the memory is freed (or otherwise unaccessible) we write to the shadow
  144. // values with tid/epoch related to the free and the freed bit set.
  145. // During memory accesses processing the freed bit is considered
  146. // as msb of tid. So any access races with shadow with freed bit set
  147. // (it is as if write from a thread with which we never synchronized before).
  148. // This allows us to detect accesses to freed memory w/o additional
  149. // overheads in memory access processing and at the same time restore
  150. // tid/epoch of free.
  151. void MarkAsFreed() { x_ |= kFreedBit; }
  152. bool IsFreed() const { return x_ & kFreedBit; }
  153. bool GetFreedAndReset() {
  154. bool res = x_ & kFreedBit;
  155. x_ &= ~kFreedBit;
  156. return res;
  157. }
  158. bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
  159. bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift) |
  160. (u64(kIsAtomic) << kAtomicShift));
  161. DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
  162. return v;
  163. }
  164. bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
  165. bool v = ((x_ >> kReadShift) & 3) <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
  166. DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
  167. (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
  168. return v;
  169. }
  170. bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
  171. bool v = ((x_ >> kReadShift) & 3) >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
  172. DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
  173. (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
  174. return v;
  175. }
  176. private:
  177. static const u64 kReadShift = 5 + kClkBits;
  178. static const u64 kReadBit = 1ull << kReadShift;
  179. static const u64 kAtomicShift = 6 + kClkBits;
  180. static const u64 kAtomicBit = 1ull << kAtomicShift;
  181. u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
  182. static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
  183. if (s1.addr0() == s2.addr0())
  184. return true;
  185. if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
  186. return true;
  187. if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
  188. return true;
  189. return false;
  190. }
  191. };
  192. const RawShadow kShadowRodata = (RawShadow)-1; // .rodata shadow marker
  193. } // namespace __tsan
  194. #endif