tsan_rtl.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. //===-- tsan_rtl.cpp ------------------------------------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // This file is a part of ThreadSanitizer (TSan), a race detector.
  10. //
  11. // Main file (entry points) for the TSan run-time.
  12. //===----------------------------------------------------------------------===//
  13. #include "tsan_rtl.h"
  14. #include "sanitizer_common/sanitizer_atomic.h"
  15. #include "sanitizer_common/sanitizer_common.h"
  16. #include "sanitizer_common/sanitizer_file.h"
  17. #include "sanitizer_common/sanitizer_libc.h"
  18. #include "sanitizer_common/sanitizer_placement_new.h"
  19. #include "sanitizer_common/sanitizer_stackdepot.h"
  20. #include "sanitizer_common/sanitizer_symbolizer.h"
  21. #include "tsan_defs.h"
  22. #include "tsan_interface.h"
  23. #include "tsan_mman.h"
  24. #include "tsan_platform.h"
  25. #include "tsan_suppressions.h"
  26. #include "tsan_symbolize.h"
  27. #include "ubsan/ubsan_init.h"
  28. volatile int __tsan_resumed = 0;
  29. extern "C" void __tsan_resume() {
  30. __tsan_resumed = 1;
  31. }
  32. namespace __tsan {
  33. #if !SANITIZER_GO
  34. void (*on_initialize)(void);
  35. int (*on_finalize)(int);
  36. #endif
  37. #if !SANITIZER_GO && !SANITIZER_MAC
  38. __attribute__((tls_model("initial-exec")))
  39. THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
  40. SANITIZER_CACHE_LINE_SIZE);
  41. #endif
  42. static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
  43. Context *ctx;
  44. // Can be overriden by a front-end.
  45. #ifdef TSAN_EXTERNAL_HOOKS
  46. bool OnFinalize(bool failed);
  47. void OnInitialize();
  48. #else
  49. #include <dlfcn.h>
  50. SANITIZER_WEAK_CXX_DEFAULT_IMPL
  51. bool OnFinalize(bool failed) {
  52. #if !SANITIZER_GO
  53. if (on_finalize)
  54. return on_finalize(failed);
  55. #endif
  56. return failed;
  57. }
  58. SANITIZER_WEAK_CXX_DEFAULT_IMPL
  59. void OnInitialize() {
  60. #if !SANITIZER_GO
  61. if (on_initialize)
  62. on_initialize();
  63. #endif
  64. }
  65. #endif
  66. static ThreadContextBase *CreateThreadContext(Tid tid) {
  67. // Map thread trace when context is created.
  68. char name[50];
  69. internal_snprintf(name, sizeof(name), "trace %u", tid);
  70. MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
  71. const uptr hdr = GetThreadTraceHeader(tid);
  72. internal_snprintf(name, sizeof(name), "trace header %u", tid);
  73. MapThreadTrace(hdr, sizeof(Trace), name);
  74. new((void*)hdr) Trace();
  75. // We are going to use only a small part of the trace with the default
  76. // value of history_size. However, the constructor writes to the whole trace.
  77. // Release the unused part.
  78. uptr hdr_end = hdr + sizeof(Trace);
  79. hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
  80. hdr_end = RoundUp(hdr_end, GetPageSizeCached());
  81. if (hdr_end < hdr + sizeof(Trace)) {
  82. ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
  83. uptr unused = hdr + sizeof(Trace) - hdr_end;
  84. if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
  85. Report("ThreadSanitizer: failed to mprotect [0x%zx-0x%zx) \n", hdr_end,
  86. unused);
  87. CHECK("unable to mprotect" && 0);
  88. }
  89. }
  90. return New<ThreadContext>(tid);
  91. }
  92. #if !SANITIZER_GO
  93. static const u32 kThreadQuarantineSize = 16;
  94. #else
  95. static const u32 kThreadQuarantineSize = 64;
  96. #endif
  97. Context::Context()
  98. : initialized(),
  99. report_mtx(MutexTypeReport),
  100. nreported(),
  101. thread_registry(CreateThreadContext, kMaxTid, kThreadQuarantineSize,
  102. kMaxTidReuse),
  103. racy_mtx(MutexTypeRacy),
  104. racy_stacks(),
  105. racy_addresses(),
  106. fired_suppressions_mtx(MutexTypeFired),
  107. clock_alloc(LINKER_INITIALIZED, "clock allocator") {
  108. fired_suppressions.reserve(8);
  109. }
  110. // The objects are allocated in TLS, so one may rely on zero-initialization.
  111. ThreadState::ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
  112. unsigned reuse_count, uptr stk_addr, uptr stk_size,
  113. uptr tls_addr, uptr tls_size)
  114. : fast_state(tid, epoch)
  115. // Do not touch these, rely on zero initialization,
  116. // they may be accessed before the ctor.
  117. // , ignore_reads_and_writes()
  118. // , ignore_interceptors()
  119. ,
  120. clock(tid, reuse_count)
  121. #if !SANITIZER_GO
  122. ,
  123. jmp_bufs()
  124. #endif
  125. ,
  126. tid(tid),
  127. unique_id(unique_id),
  128. stk_addr(stk_addr),
  129. stk_size(stk_size),
  130. tls_addr(tls_addr),
  131. tls_size(tls_size)
  132. #if !SANITIZER_GO
  133. ,
  134. last_sleep_clock(tid)
  135. #endif
  136. {
  137. CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
  138. #if !SANITIZER_GO
  139. shadow_stack_pos = shadow_stack;
  140. shadow_stack_end = shadow_stack + kShadowStackSize;
  141. #else
  142. // Setup dynamic shadow stack.
  143. const int kInitStackSize = 8;
  144. shadow_stack = (uptr *)Alloc(kInitStackSize * sizeof(uptr));
  145. shadow_stack_pos = shadow_stack;
  146. shadow_stack_end = shadow_stack + kInitStackSize;
  147. #endif
  148. }
  149. #if !SANITIZER_GO
  150. void MemoryProfiler(u64 uptime) {
  151. if (ctx->memprof_fd == kInvalidFd)
  152. return;
  153. InternalMmapVector<char> buf(4096);
  154. WriteMemoryProfile(buf.data(), buf.size(), uptime);
  155. WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
  156. }
  157. void InitializeMemoryProfiler() {
  158. ctx->memprof_fd = kInvalidFd;
  159. const char *fname = flags()->profile_memory;
  160. if (!fname || !fname[0])
  161. return;
  162. if (internal_strcmp(fname, "stdout") == 0) {
  163. ctx->memprof_fd = 1;
  164. } else if (internal_strcmp(fname, "stderr") == 0) {
  165. ctx->memprof_fd = 2;
  166. } else {
  167. InternalScopedString filename;
  168. filename.append("%s.%d", fname, (int)internal_getpid());
  169. ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
  170. if (ctx->memprof_fd == kInvalidFd) {
  171. Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
  172. filename.data());
  173. return;
  174. }
  175. }
  176. MemoryProfiler(0);
  177. MaybeSpawnBackgroundThread();
  178. }
  179. static void *BackgroundThread(void *arg) {
  180. // This is a non-initialized non-user thread, nothing to see here.
  181. // We don't use ScopedIgnoreInterceptors, because we want ignores to be
  182. // enabled even when the thread function exits (e.g. during pthread thread
  183. // shutdown code).
  184. cur_thread_init()->ignore_interceptors++;
  185. const u64 kMs2Ns = 1000 * 1000;
  186. const u64 start = NanoTime();
  187. u64 last_flush = NanoTime();
  188. uptr last_rss = 0;
  189. for (int i = 0;
  190. atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
  191. i++) {
  192. SleepForMillis(100);
  193. u64 now = NanoTime();
  194. // Flush memory if requested.
  195. if (flags()->flush_memory_ms > 0) {
  196. if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
  197. VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
  198. FlushShadowMemory();
  199. last_flush = NanoTime();
  200. }
  201. }
  202. if (flags()->memory_limit_mb > 0) {
  203. uptr rss = GetRSS();
  204. uptr limit = uptr(flags()->memory_limit_mb) << 20;
  205. VPrintf(1, "ThreadSanitizer: memory flush check"
  206. " RSS=%llu LAST=%llu LIMIT=%llu\n",
  207. (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
  208. if (2 * rss > limit + last_rss) {
  209. VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
  210. FlushShadowMemory();
  211. rss = GetRSS();
  212. VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
  213. }
  214. last_rss = rss;
  215. }
  216. MemoryProfiler(now - start);
  217. // Flush symbolizer cache if requested.
  218. if (flags()->flush_symbolizer_ms > 0) {
  219. u64 last = atomic_load(&ctx->last_symbolize_time_ns,
  220. memory_order_relaxed);
  221. if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
  222. Lock l(&ctx->report_mtx);
  223. ScopedErrorReportLock l2;
  224. SymbolizeFlush();
  225. atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
  226. }
  227. }
  228. }
  229. return nullptr;
  230. }
  231. static void StartBackgroundThread() {
  232. ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
  233. }
  234. #ifndef __mips__
  235. static void StopBackgroundThread() {
  236. atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
  237. internal_join_thread(ctx->background_thread);
  238. ctx->background_thread = 0;
  239. }
  240. #endif
  241. #endif
  242. void DontNeedShadowFor(uptr addr, uptr size) {
  243. ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
  244. reinterpret_cast<uptr>(MemToShadow(addr + size)));
  245. }
  246. #if !SANITIZER_GO
  247. void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
  248. if (size == 0) return;
  249. DontNeedShadowFor(addr, size);
  250. ScopedGlobalProcessor sgp;
  251. ctx->metamap.ResetRange(thr->proc(), addr, size);
  252. }
  253. #endif
  254. void MapShadow(uptr addr, uptr size) {
  255. // Global data is not 64K aligned, but there are no adjacent mappings,
  256. // so we can get away with unaligned mapping.
  257. // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
  258. const uptr kPageSize = GetPageSizeCached();
  259. uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
  260. uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
  261. if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
  262. "shadow"))
  263. Die();
  264. // Meta shadow is 2:1, so tread carefully.
  265. static bool data_mapped = false;
  266. static uptr mapped_meta_end = 0;
  267. uptr meta_begin = (uptr)MemToMeta(addr);
  268. uptr meta_end = (uptr)MemToMeta(addr + size);
  269. meta_begin = RoundDownTo(meta_begin, 64 << 10);
  270. meta_end = RoundUpTo(meta_end, 64 << 10);
  271. if (!data_mapped) {
  272. // First call maps data+bss.
  273. data_mapped = true;
  274. if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
  275. "meta shadow"))
  276. Die();
  277. } else {
  278. // Mapping continuous heap.
  279. // Windows wants 64K alignment.
  280. meta_begin = RoundDownTo(meta_begin, 64 << 10);
  281. meta_end = RoundUpTo(meta_end, 64 << 10);
  282. if (meta_end <= mapped_meta_end)
  283. return;
  284. if (meta_begin < mapped_meta_end)
  285. meta_begin = mapped_meta_end;
  286. if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
  287. "meta shadow"))
  288. Die();
  289. mapped_meta_end = meta_end;
  290. }
  291. VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
  292. addr + size, meta_begin, meta_end);
  293. }
  294. void MapThreadTrace(uptr addr, uptr size, const char *name) {
  295. DPrintf("#0: Mapping trace at 0x%zx-0x%zx(0x%zx)\n", addr, addr + size, size);
  296. CHECK_GE(addr, TraceMemBeg());
  297. CHECK_LE(addr + size, TraceMemEnd());
  298. CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
  299. if (!MmapFixedSuperNoReserve(addr, size, name)) {
  300. Printf("FATAL: ThreadSanitizer can not mmap thread trace (0x%zx/0x%zx)\n",
  301. addr, size);
  302. Die();
  303. }
  304. }
  305. #if !SANITIZER_GO
  306. static void OnStackUnwind(const SignalContext &sig, const void *,
  307. BufferedStackTrace *stack) {
  308. stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
  309. common_flags()->fast_unwind_on_fatal);
  310. }
  311. static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
  312. HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
  313. }
  314. #endif
  315. void CheckUnwind() {
  316. // There is high probability that interceptors will check-fail as well,
  317. // on the other hand there is no sense in processing interceptors
  318. // since we are going to die soon.
  319. ScopedIgnoreInterceptors ignore;
  320. #if !SANITIZER_GO
  321. cur_thread()->ignore_sync++;
  322. cur_thread()->ignore_reads_and_writes++;
  323. #endif
  324. PrintCurrentStackSlow(StackTrace::GetCurrentPc());
  325. }
  326. bool is_initialized;
  327. void Initialize(ThreadState *thr) {
  328. // Thread safe because done before all threads exist.
  329. if (is_initialized)
  330. return;
  331. is_initialized = true;
  332. // We are not ready to handle interceptors yet.
  333. ScopedIgnoreInterceptors ignore;
  334. SanitizerToolName = "ThreadSanitizer";
  335. // Install tool-specific callbacks in sanitizer_common.
  336. SetCheckUnwindCallback(CheckUnwind);
  337. ctx = new(ctx_placeholder) Context;
  338. const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
  339. const char *options = GetEnv(env_name);
  340. CacheBinaryName();
  341. CheckASLR();
  342. InitializeFlags(&ctx->flags, options, env_name);
  343. AvoidCVE_2016_2143();
  344. __sanitizer::InitializePlatformEarly();
  345. __tsan::InitializePlatformEarly();
  346. #if !SANITIZER_GO
  347. // Re-exec ourselves if we need to set additional env or command line args.
  348. MaybeReexec();
  349. InitializeAllocator();
  350. ReplaceSystemMalloc();
  351. #endif
  352. if (common_flags()->detect_deadlocks)
  353. ctx->dd = DDetector::Create(flags());
  354. Processor *proc = ProcCreate();
  355. ProcWire(proc, thr);
  356. InitializeInterceptors();
  357. InitializePlatform();
  358. InitializeDynamicAnnotations();
  359. #if !SANITIZER_GO
  360. InitializeShadowMemory();
  361. InitializeAllocatorLate();
  362. InstallDeadlySignalHandlers(TsanOnDeadlySignal);
  363. #endif
  364. // Setup correct file descriptor for error reports.
  365. __sanitizer_set_report_path(common_flags()->log_path);
  366. InitializeSuppressions();
  367. #if !SANITIZER_GO
  368. InitializeLibIgnore();
  369. Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
  370. #endif
  371. VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
  372. (int)internal_getpid());
  373. // Initialize thread 0.
  374. Tid tid = ThreadCreate(thr, 0, 0, true);
  375. CHECK_EQ(tid, kMainTid);
  376. ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
  377. #if TSAN_CONTAINS_UBSAN
  378. __ubsan::InitAsPlugin();
  379. #endif
  380. ctx->initialized = true;
  381. #if !SANITIZER_GO
  382. Symbolizer::LateInitialize();
  383. InitializeMemoryProfiler();
  384. #endif
  385. if (flags()->stop_on_start) {
  386. Printf("ThreadSanitizer is suspended at startup (pid %d)."
  387. " Call __tsan_resume().\n",
  388. (int)internal_getpid());
  389. while (__tsan_resumed == 0) {}
  390. }
  391. OnInitialize();
  392. }
  393. void MaybeSpawnBackgroundThread() {
  394. // On MIPS, TSan initialization is run before
  395. // __pthread_initialize_minimal_internal() is finished, so we can not spawn
  396. // new threads.
  397. #if !SANITIZER_GO && !defined(__mips__)
  398. static atomic_uint32_t bg_thread = {};
  399. if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
  400. atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
  401. StartBackgroundThread();
  402. SetSandboxingCallback(StopBackgroundThread);
  403. }
  404. #endif
  405. }
  406. int Finalize(ThreadState *thr) {
  407. bool failed = false;
  408. if (common_flags()->print_module_map == 1)
  409. DumpProcessMap();
  410. if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
  411. SleepForMillis(flags()->atexit_sleep_ms);
  412. // Wait for pending reports.
  413. ctx->report_mtx.Lock();
  414. { ScopedErrorReportLock l; }
  415. ctx->report_mtx.Unlock();
  416. #if !SANITIZER_GO
  417. if (Verbosity()) AllocatorPrintStats();
  418. #endif
  419. ThreadFinalize(thr);
  420. if (ctx->nreported) {
  421. failed = true;
  422. #if !SANITIZER_GO
  423. Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
  424. #else
  425. Printf("Found %d data race(s)\n", ctx->nreported);
  426. #endif
  427. }
  428. if (common_flags()->print_suppressions)
  429. PrintMatchedSuppressions();
  430. failed = OnFinalize(failed);
  431. return failed ? common_flags()->exitcode : 0;
  432. }
  433. #if !SANITIZER_GO
  434. void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
  435. ctx->thread_registry.Lock();
  436. ctx->report_mtx.Lock();
  437. ScopedErrorReportLock::Lock();
  438. // Suppress all reports in the pthread_atfork callbacks.
  439. // Reports will deadlock on the report_mtx.
  440. // We could ignore sync operations as well,
  441. // but so far it's unclear if it will do more good or harm.
  442. // Unnecessarily ignoring things can lead to false positives later.
  443. thr->suppress_reports++;
  444. // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
  445. // we'll assert in CheckNoLocks() unless we ignore interceptors.
  446. thr->ignore_interceptors++;
  447. }
  448. void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
  449. thr->suppress_reports--; // Enabled in ForkBefore.
  450. thr->ignore_interceptors--;
  451. ScopedErrorReportLock::Unlock();
  452. ctx->report_mtx.Unlock();
  453. ctx->thread_registry.Unlock();
  454. }
  455. void ForkChildAfter(ThreadState *thr, uptr pc,
  456. bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
  457. thr->suppress_reports--; // Enabled in ForkBefore.
  458. thr->ignore_interceptors--;
  459. ScopedErrorReportLock::Unlock();
  460. ctx->report_mtx.Unlock();
  461. ctx->thread_registry.Unlock();
  462. uptr nthread = 0;
  463. ctx->thread_registry.GetNumberOfThreads(0, 0, &nthread /* alive threads */);
  464. VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
  465. " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
  466. if (nthread == 1) {
  467. if (start_thread)
  468. StartBackgroundThread();
  469. } else {
  470. // We've just forked a multi-threaded process. We cannot reasonably function
  471. // after that (some mutexes may be locked before fork). So just enable
  472. // ignores for everything in the hope that we will exec soon.
  473. ctx->after_multithreaded_fork = true;
  474. thr->ignore_interceptors++;
  475. ThreadIgnoreBegin(thr, pc);
  476. ThreadIgnoreSyncBegin(thr, pc);
  477. }
  478. }
  479. #endif
  480. #if SANITIZER_GO
  481. NOINLINE
  482. void GrowShadowStack(ThreadState *thr) {
  483. const int sz = thr->shadow_stack_end - thr->shadow_stack;
  484. const int newsz = 2 * sz;
  485. auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
  486. internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
  487. Free(thr->shadow_stack);
  488. thr->shadow_stack = newstack;
  489. thr->shadow_stack_pos = newstack + sz;
  490. thr->shadow_stack_end = newstack + newsz;
  491. }
  492. #endif
  493. StackID CurrentStackId(ThreadState *thr, uptr pc) {
  494. if (!thr->is_inited) // May happen during bootstrap.
  495. return kInvalidStackID;
  496. if (pc != 0) {
  497. #if !SANITIZER_GO
  498. DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
  499. #else
  500. if (thr->shadow_stack_pos == thr->shadow_stack_end)
  501. GrowShadowStack(thr);
  502. #endif
  503. thr->shadow_stack_pos[0] = pc;
  504. thr->shadow_stack_pos++;
  505. }
  506. StackID id = StackDepotPut(
  507. StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
  508. if (pc != 0)
  509. thr->shadow_stack_pos--;
  510. return id;
  511. }
  512. namespace v3 {
  513. NOINLINE
  514. void TraceSwitchPart(ThreadState *thr) {
  515. Trace *trace = &thr->tctx->trace;
  516. Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
  517. DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
  518. auto *part = trace->parts.Back();
  519. DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos);
  520. if (part) {
  521. // We can get here when we still have space in the current trace part.
  522. // The fast-path check in TraceAcquire has false positives in the middle of
  523. // the part. Check if we are indeed at the end of the current part or not,
  524. // and fill any gaps with NopEvent's.
  525. Event *end = &part->events[TracePart::kSize];
  526. DCHECK_GE(pos, &part->events[0]);
  527. DCHECK_LE(pos, end);
  528. if (pos + 1 < end) {
  529. if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
  530. TracePart::kAlignment)
  531. *pos++ = NopEvent;
  532. *pos++ = NopEvent;
  533. DCHECK_LE(pos + 2, end);
  534. atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
  535. // Ensure we setup trace so that the next TraceAcquire
  536. // won't detect trace part end.
  537. Event *ev;
  538. CHECK(TraceAcquire(thr, &ev));
  539. return;
  540. }
  541. // We are indeed at the end.
  542. for (; pos < end; pos++) *pos = NopEvent;
  543. }
  544. #if !SANITIZER_GO
  545. if (ctx->after_multithreaded_fork) {
  546. // We just need to survive till exec.
  547. CHECK(part);
  548. atomic_store_relaxed(&thr->trace_pos,
  549. reinterpret_cast<uptr>(&part->events[0]));
  550. return;
  551. }
  552. #endif
  553. part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart();
  554. part->trace = trace;
  555. thr->trace_prev_pc = 0;
  556. {
  557. Lock lock(&trace->mtx);
  558. trace->parts.PushBack(part);
  559. atomic_store_relaxed(&thr->trace_pos,
  560. reinterpret_cast<uptr>(&part->events[0]));
  561. }
  562. // Make this part self-sufficient by restoring the current stack
  563. // and mutex set in the beginning of the trace.
  564. TraceTime(thr);
  565. for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++)
  566. CHECK(TryTraceFunc(thr, *pos));
  567. for (uptr i = 0; i < thr->mset.Size(); i++) {
  568. MutexSet::Desc d = thr->mset.Get(i);
  569. TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
  570. d.addr, d.stack_id);
  571. }
  572. }
  573. } // namespace v3
  574. void TraceSwitch(ThreadState *thr) {
  575. #if !SANITIZER_GO
  576. if (ctx->after_multithreaded_fork)
  577. return;
  578. #endif
  579. thr->nomalloc++;
  580. Trace *thr_trace = ThreadTrace(thr->tid);
  581. Lock l(&thr_trace->mtx);
  582. unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
  583. TraceHeader *hdr = &thr_trace->headers[trace];
  584. hdr->epoch0 = thr->fast_state.epoch();
  585. ObtainCurrentStack(thr, 0, &hdr->stack0);
  586. hdr->mset0 = thr->mset;
  587. thr->nomalloc--;
  588. }
  589. Trace *ThreadTrace(Tid tid) { return (Trace *)GetThreadTraceHeader(tid); }
  590. uptr TraceTopPC(ThreadState *thr) {
  591. Event *events = (Event*)GetThreadTrace(thr->tid);
  592. uptr pc = events[thr->fast_state.GetTracePos()];
  593. return pc;
  594. }
  595. uptr TraceSize() {
  596. return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
  597. }
  598. uptr TraceParts() {
  599. return TraceSize() / kTracePartSize;
  600. }
  601. #if !SANITIZER_GO
  602. extern "C" void __tsan_trace_switch() {
  603. TraceSwitch(cur_thread());
  604. }
  605. extern "C" void __tsan_report_race() {
  606. ReportRace(cur_thread());
  607. }
  608. #endif
  609. void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
  610. DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
  611. thr->ignore_reads_and_writes++;
  612. CHECK_GT(thr->ignore_reads_and_writes, 0);
  613. thr->fast_state.SetIgnoreBit();
  614. #if !SANITIZER_GO
  615. if (pc && !ctx->after_multithreaded_fork)
  616. thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
  617. #endif
  618. }
  619. void ThreadIgnoreEnd(ThreadState *thr) {
  620. DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
  621. CHECK_GT(thr->ignore_reads_and_writes, 0);
  622. thr->ignore_reads_and_writes--;
  623. if (thr->ignore_reads_and_writes == 0) {
  624. thr->fast_state.ClearIgnoreBit();
  625. #if !SANITIZER_GO
  626. thr->mop_ignore_set.Reset();
  627. #endif
  628. }
  629. }
  630. #if !SANITIZER_GO
  631. extern "C" SANITIZER_INTERFACE_ATTRIBUTE
  632. uptr __tsan_testonly_shadow_stack_current_size() {
  633. ThreadState *thr = cur_thread();
  634. return thr->shadow_stack_pos - thr->shadow_stack;
  635. }
  636. #endif
  637. void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
  638. DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
  639. thr->ignore_sync++;
  640. CHECK_GT(thr->ignore_sync, 0);
  641. #if !SANITIZER_GO
  642. if (pc && !ctx->after_multithreaded_fork)
  643. thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
  644. #endif
  645. }
  646. void ThreadIgnoreSyncEnd(ThreadState *thr) {
  647. DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
  648. CHECK_GT(thr->ignore_sync, 0);
  649. thr->ignore_sync--;
  650. #if !SANITIZER_GO
  651. if (thr->ignore_sync == 0)
  652. thr->sync_ignore_set.Reset();
  653. #endif
  654. }
  655. bool MD5Hash::operator==(const MD5Hash &other) const {
  656. return hash[0] == other.hash[0] && hash[1] == other.hash[1];
  657. }
  658. #if SANITIZER_DEBUG
  659. void build_consistency_debug() {}
  660. #else
  661. void build_consistency_release() {}
  662. #endif
  663. } // namespace __tsan
  664. #if SANITIZER_CHECK_DEADLOCKS
  665. namespace __sanitizer {
  666. using namespace __tsan;
  667. MutexMeta mutex_meta[] = {
  668. {MutexInvalid, "Invalid", {}},
  669. {MutexThreadRegistry, "ThreadRegistry", {}},
  670. {MutexTypeTrace, "Trace", {MutexLeaf}},
  671. {MutexTypeReport, "Report", {MutexTypeSyncVar}},
  672. {MutexTypeSyncVar, "SyncVar", {}},
  673. {MutexTypeAnnotations, "Annotations", {}},
  674. {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
  675. {MutexTypeFired, "Fired", {MutexLeaf}},
  676. {MutexTypeRacy, "Racy", {MutexLeaf}},
  677. {MutexTypeGlobalProc, "GlobalProc", {}},
  678. {},
  679. };
  680. void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
  681. } // namespace __sanitizer
  682. #endif