tsan_trace.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. //===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef TSAN_TRACE_H
  13. #define TSAN_TRACE_H
  14. #include "tsan_defs.h"
  15. #include "tsan_ilist.h"
  16. #include "tsan_mutexset.h"
  17. #include "tsan_stack_trace.h"
  18. namespace __tsan {
  19. const int kTracePartSizeBits = 13;
  20. const int kTracePartSize = 1 << kTracePartSizeBits;
  21. const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
  22. const int kTraceSize = kTracePartSize * kTraceParts;
  23. // Must fit into 3 bits.
  24. enum EventType {
  25. EventTypeMop,
  26. EventTypeFuncEnter,
  27. EventTypeFuncExit,
  28. EventTypeLock,
  29. EventTypeUnlock,
  30. EventTypeRLock,
  31. EventTypeRUnlock
  32. };
  33. // Represents a thread event (from most significant bit):
  34. // u64 typ : 3; // EventType.
  35. // u64 addr : 61; // Associated pc.
  36. typedef u64 Event;
  37. const uptr kEventPCBits = 61;
  38. struct TraceHeader {
  39. #if !SANITIZER_GO
  40. BufferedStackTrace stack0; // Start stack for the trace.
  41. #else
  42. VarSizeStackTrace stack0;
  43. #endif
  44. u64 epoch0; // Start epoch for the trace.
  45. MutexSet mset0;
  46. TraceHeader() : stack0(), epoch0() {}
  47. };
  48. struct Trace {
  49. Mutex mtx;
  50. #if !SANITIZER_GO
  51. // Must be last to catch overflow as paging fault.
  52. // Go shadow stack is dynamically allocated.
  53. uptr shadow_stack[kShadowStackSize];
  54. #endif
  55. // Must be the last field, because we unmap the unused part in
  56. // CreateThreadContext.
  57. TraceHeader headers[kTraceParts];
  58. Trace() : mtx(MutexTypeTrace) {}
  59. };
  60. namespace v3 {
  61. enum class EventType : u64 {
  62. kAccessExt,
  63. kAccessRange,
  64. kLock,
  65. kRLock,
  66. kUnlock,
  67. kTime,
  68. };
  69. // "Base" type for all events for type dispatch.
  70. struct Event {
  71. // We use variable-length type encoding to give more bits to some event
  72. // types that need them. If is_access is set, this is EventAccess.
  73. // Otherwise, if is_func is set, this is EventFunc.
  74. // Otherwise type denotes the type.
  75. u64 is_access : 1;
  76. u64 is_func : 1;
  77. EventType type : 3;
  78. u64 _ : 59;
  79. };
  80. static_assert(sizeof(Event) == 8, "bad Event size");
  81. // Nop event used as padding and does not affect state during replay.
  82. static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
  83. // Compressed memory access can represent only some events with PCs
  84. // close enough to each other. Otherwise we fall back to EventAccessExt.
  85. struct EventAccess {
  86. static constexpr uptr kPCBits = 15;
  87. u64 is_access : 1; // = 1
  88. u64 is_read : 1;
  89. u64 is_atomic : 1;
  90. u64 size_log : 2;
  91. u64 pc_delta : kPCBits; // signed delta from the previous memory access PC
  92. u64 addr : kCompressedAddrBits;
  93. };
  94. static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
  95. // Function entry (pc != 0) or exit (pc == 0).
  96. struct EventFunc {
  97. u64 is_access : 1; // = 0
  98. u64 is_func : 1; // = 1
  99. u64 pc : 62;
  100. };
  101. static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
  102. // Extended memory access with full PC.
  103. struct EventAccessExt {
  104. u64 is_access : 1; // = 0
  105. u64 is_func : 1; // = 0
  106. EventType type : 3; // = EventType::kAccessExt
  107. u64 is_read : 1;
  108. u64 is_atomic : 1;
  109. u64 size_log : 2;
  110. u64 _ : 11;
  111. u64 addr : kCompressedAddrBits;
  112. u64 pc;
  113. };
  114. static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
  115. // Access to a memory range.
  116. struct EventAccessRange {
  117. static constexpr uptr kSizeLoBits = 13;
  118. u64 is_access : 1; // = 0
  119. u64 is_func : 1; // = 0
  120. EventType type : 3; // = EventType::kAccessRange
  121. u64 is_read : 1;
  122. u64 is_free : 1;
  123. u64 size_lo : kSizeLoBits;
  124. u64 pc : kCompressedAddrBits;
  125. u64 addr : kCompressedAddrBits;
  126. u64 size_hi : 64 - kCompressedAddrBits;
  127. };
  128. static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
  129. // Mutex lock.
  130. struct EventLock {
  131. static constexpr uptr kStackIDLoBits = 15;
  132. u64 is_access : 1; // = 0
  133. u64 is_func : 1; // = 0
  134. EventType type : 3; // = EventType::kLock or EventType::kRLock
  135. u64 pc : kCompressedAddrBits;
  136. u64 stack_lo : kStackIDLoBits;
  137. u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
  138. u64 _ : 3;
  139. u64 addr : kCompressedAddrBits;
  140. };
  141. static_assert(sizeof(EventLock) == 16, "bad EventLock size");
  142. // Mutex unlock.
  143. struct EventUnlock {
  144. u64 is_access : 1; // = 0
  145. u64 is_func : 1; // = 0
  146. EventType type : 3; // = EventType::kUnlock
  147. u64 _ : 15;
  148. u64 addr : kCompressedAddrBits;
  149. };
  150. static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
  151. // Time change event.
  152. struct EventTime {
  153. u64 is_access : 1; // = 0
  154. u64 is_func : 1; // = 0
  155. EventType type : 3; // = EventType::kTime
  156. u64 sid : sizeof(Sid) * kByteBits;
  157. u64 epoch : kEpochBits;
  158. u64 _ : 64 - 5 - sizeof(Sid) * kByteBits - kEpochBits;
  159. };
  160. static_assert(sizeof(EventTime) == 8, "bad EventTime size");
  161. struct Trace;
  162. struct TraceHeader {
  163. Trace* trace = nullptr; // back-pointer to Trace containing this part
  164. INode trace_parts; // in Trace::parts
  165. };
  166. struct TracePart : TraceHeader {
  167. static constexpr uptr kByteSize = 256 << 10;
  168. static constexpr uptr kSize =
  169. (kByteSize - sizeof(TraceHeader)) / sizeof(Event);
  170. // TraceAcquire does a fast event pointer overflow check by comparing
  171. // pointer into TracePart::events with kAlignment mask. Since TracePart's
  172. // are allocated page-aligned, this check detects end of the array
  173. // (it also have false positives in the middle that are filtered separately).
  174. // This also requires events to be the last field.
  175. static constexpr uptr kAlignment = 0xff0;
  176. Event events[kSize];
  177. TracePart() {}
  178. };
  179. static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
  180. struct Trace {
  181. Mutex mtx;
  182. IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
  183. Event* final_pos =
  184. nullptr; // final position in the last part for finished threads
  185. Trace() : mtx(MutexTypeTrace) {}
  186. };
  187. } // namespace v3
  188. } // namespace __tsan
  189. #endif // TSAN_TRACE_H