sanitizer_stoptheworld_linux_libcdep.cpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623
  1. //===-- sanitizer_stoptheworld_linux_libcdep.cpp --------------------------===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===----------------------------------------------------------------------===//
  8. //
  9. // See sanitizer_stoptheworld.h for details.
  10. // This implementation was inspired by Markus Gutschke's linuxthreads.cc.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "sanitizer_platform.h"
  14. #if SANITIZER_LINUX && \
  15. (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
  16. defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
  17. defined(__arm__) || SANITIZER_RISCV64)
  18. #include "sanitizer_stoptheworld.h"
  19. #include "sanitizer_platform_limits_posix.h"
  20. #include "sanitizer_atomic.h"
  21. #include <errno.h>
  22. #include <sched.h> // for CLONE_* definitions
  23. #include <stddef.h>
  24. #include <sys/prctl.h> // for PR_* definitions
  25. #include <sys/ptrace.h> // for PTRACE_* definitions
  26. #include <sys/types.h> // for pid_t
  27. #include <sys/uio.h> // for iovec
  28. #include <elf.h> // for NT_PRSTATUS
  29. #if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID
  30. // GLIBC 2.20+ sys/user does not include asm/ptrace.h
  31. # include <asm/ptrace.h>
  32. #endif
  33. #include <sys/user.h> // for user_regs_struct
  34. #if SANITIZER_ANDROID && SANITIZER_MIPS
  35. # include <asm/reg.h> // for mips SP register in sys/user.h
  36. #endif
  37. #include <sys/wait.h> // for signal-related stuff
  38. #ifdef sa_handler
  39. # undef sa_handler
  40. #endif
  41. #ifdef sa_sigaction
  42. # undef sa_sigaction
  43. #endif
  44. #include "sanitizer_common.h"
  45. #include "sanitizer_flags.h"
  46. #include "sanitizer_libc.h"
  47. #include "sanitizer_linux.h"
  48. #include "sanitizer_mutex.h"
  49. #include "sanitizer_placement_new.h"
  50. // Sufficiently old kernel headers don't provide this value, but we can still
  51. // call prctl with it. If the runtime kernel is new enough, the prctl call will
  52. // have the desired effect; if the kernel is too old, the call will error and we
  53. // can ignore said error.
  54. #ifndef PR_SET_PTRACER
  55. #define PR_SET_PTRACER 0x59616d61
  56. #endif
  57. // This module works by spawning a Linux task which then attaches to every
  58. // thread in the caller process with ptrace. This suspends the threads, and
  59. // PTRACE_GETREGS can then be used to obtain their register state. The callback
  60. // supplied to StopTheWorld() is run in the tracer task while the threads are
  61. // suspended.
  62. // The tracer task must be placed in a different thread group for ptrace to
  63. // work, so it cannot be spawned as a pthread. Instead, we use the low-level
  64. // clone() interface (we want to share the address space with the caller
  65. // process, so we prefer clone() over fork()).
  66. //
  67. // We don't use any libc functions, relying instead on direct syscalls. There
  68. // are two reasons for this:
  69. // 1. calling a library function while threads are suspended could cause a
  70. // deadlock, if one of the treads happens to be holding a libc lock;
  71. // 2. it's generally not safe to call libc functions from the tracer task,
  72. // because clone() does not set up a thread-local storage for it. Any
  73. // thread-local variables used by libc will be shared between the tracer task
  74. // and the thread which spawned it.
  75. namespace __sanitizer {
  76. class SuspendedThreadsListLinux final : public SuspendedThreadsList {
  77. public:
  78. SuspendedThreadsListLinux() { thread_ids_.reserve(1024); }
  79. tid_t GetThreadID(uptr index) const override;
  80. uptr ThreadCount() const override;
  81. bool ContainsTid(tid_t thread_id) const;
  82. void Append(tid_t tid);
  83. PtraceRegistersStatus GetRegistersAndSP(uptr index,
  84. InternalMmapVector<uptr> *buffer,
  85. uptr *sp) const override;
  86. private:
  87. InternalMmapVector<tid_t> thread_ids_;
  88. };
  89. // Structure for passing arguments into the tracer thread.
  90. struct TracerThreadArgument {
  91. StopTheWorldCallback callback;
  92. void *callback_argument;
  93. // The tracer thread waits on this mutex while the parent finishes its
  94. // preparations.
  95. Mutex mutex;
  96. // Tracer thread signals its completion by setting done.
  97. atomic_uintptr_t done;
  98. uptr parent_pid;
  99. };
  100. // This class handles thread suspending/unsuspending in the tracer thread.
  101. class ThreadSuspender {
  102. public:
  103. explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
  104. : arg(arg)
  105. , pid_(pid) {
  106. CHECK_GE(pid, 0);
  107. }
  108. bool SuspendAllThreads();
  109. void ResumeAllThreads();
  110. void KillAllThreads();
  111. SuspendedThreadsListLinux &suspended_threads_list() {
  112. return suspended_threads_list_;
  113. }
  114. TracerThreadArgument *arg;
  115. private:
  116. SuspendedThreadsListLinux suspended_threads_list_;
  117. pid_t pid_;
  118. bool SuspendThread(tid_t thread_id);
  119. };
  120. bool ThreadSuspender::SuspendThread(tid_t tid) {
  121. // Are we already attached to this thread?
  122. // Currently this check takes linear time, however the number of threads is
  123. // usually small.
  124. if (suspended_threads_list_.ContainsTid(tid)) return false;
  125. int pterrno;
  126. if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr),
  127. &pterrno)) {
  128. // Either the thread is dead, or something prevented us from attaching.
  129. // Log this event and move on.
  130. VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr)tid,
  131. pterrno);
  132. return false;
  133. } else {
  134. VReport(2, "Attached to thread %zu.\n", (uptr)tid);
  135. // The thread is not guaranteed to stop before ptrace returns, so we must
  136. // wait on it. Note: if the thread receives a signal concurrently,
  137. // we can get notification about the signal before notification about stop.
  138. // In such case we need to forward the signal to the thread, otherwise
  139. // the signal will be missed (as we do PTRACE_DETACH with arg=0) and
  140. // any logic relying on signals will break. After forwarding we need to
  141. // continue to wait for stopping, because the thread is not stopped yet.
  142. // We do ignore delivery of SIGSTOP, because we want to make stop-the-world
  143. // as invisible as possible.
  144. for (;;) {
  145. int status;
  146. uptr waitpid_status;
  147. HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL));
  148. int wperrno;
  149. if (internal_iserror(waitpid_status, &wperrno)) {
  150. // Got a ECHILD error. I don't think this situation is possible, but it
  151. // doesn't hurt to report it.
  152. VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n",
  153. (uptr)tid, wperrno);
  154. internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr);
  155. return false;
  156. }
  157. if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) {
  158. internal_ptrace(PTRACE_CONT, tid, nullptr,
  159. (void*)(uptr)WSTOPSIG(status));
  160. continue;
  161. }
  162. break;
  163. }
  164. suspended_threads_list_.Append(tid);
  165. return true;
  166. }
  167. }
  168. void ThreadSuspender::ResumeAllThreads() {
  169. for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) {
  170. pid_t tid = suspended_threads_list_.GetThreadID(i);
  171. int pterrno;
  172. if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr),
  173. &pterrno)) {
  174. VReport(2, "Detached from thread %d.\n", tid);
  175. } else {
  176. // Either the thread is dead, or we are already detached.
  177. // The latter case is possible, for instance, if this function was called
  178. // from a signal handler.
  179. VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno);
  180. }
  181. }
  182. }
  183. void ThreadSuspender::KillAllThreads() {
  184. for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++)
  185. internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
  186. nullptr, nullptr);
  187. }
  188. bool ThreadSuspender::SuspendAllThreads() {
  189. ThreadLister thread_lister(pid_);
  190. bool retry = true;
  191. InternalMmapVector<tid_t> threads;
  192. threads.reserve(128);
  193. for (int i = 0; i < 30 && retry; ++i) {
  194. retry = false;
  195. switch (thread_lister.ListThreads(&threads)) {
  196. case ThreadLister::Error:
  197. ResumeAllThreads();
  198. return false;
  199. case ThreadLister::Incomplete:
  200. retry = true;
  201. break;
  202. case ThreadLister::Ok:
  203. break;
  204. }
  205. for (tid_t tid : threads) {
  206. if (SuspendThread(tid))
  207. retry = true;
  208. }
  209. }
  210. return suspended_threads_list_.ThreadCount();
  211. }
  212. // Pointer to the ThreadSuspender instance for use in signal handler.
  213. static ThreadSuspender *thread_suspender_instance = nullptr;
  214. // Synchronous signals that should not be blocked.
  215. static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
  216. SIGXCPU, SIGXFSZ };
  217. static void TracerThreadDieCallback() {
  218. // Generally a call to Die() in the tracer thread should be fatal to the
  219. // parent process as well, because they share the address space.
  220. // This really only works correctly if all the threads are suspended at this
  221. // point. So we correctly handle calls to Die() from within the callback, but
  222. // not those that happen before or after the callback. Hopefully there aren't
  223. // a lot of opportunities for that to happen...
  224. ThreadSuspender *inst = thread_suspender_instance;
  225. if (inst && stoptheworld_tracer_pid == internal_getpid()) {
  226. inst->KillAllThreads();
  227. thread_suspender_instance = nullptr;
  228. }
  229. }
  230. // Signal handler to wake up suspended threads when the tracer thread dies.
  231. static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
  232. void *uctx) {
  233. SignalContext ctx(siginfo, uctx);
  234. Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
  235. ctx.addr, ctx.pc, ctx.sp);
  236. ThreadSuspender *inst = thread_suspender_instance;
  237. if (inst) {
  238. if (signum == SIGABRT)
  239. inst->KillAllThreads();
  240. else
  241. inst->ResumeAllThreads();
  242. RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
  243. thread_suspender_instance = nullptr;
  244. atomic_store(&inst->arg->done, 1, memory_order_relaxed);
  245. }
  246. internal__exit((signum == SIGABRT) ? 1 : 2);
  247. }
  248. // Size of alternative stack for signal handlers in the tracer thread.
  249. static const int kHandlerStackSize = 8192;
  250. // This function will be run as a cloned task.
  251. static int TracerThread(void* argument) {
  252. TracerThreadArgument *tracer_thread_argument =
  253. (TracerThreadArgument *)argument;
  254. internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
  255. // Check if parent is already dead.
  256. if (internal_getppid() != tracer_thread_argument->parent_pid)
  257. internal__exit(4);
  258. // Wait for the parent thread to finish preparations.
  259. tracer_thread_argument->mutex.Lock();
  260. tracer_thread_argument->mutex.Unlock();
  261. RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
  262. ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
  263. // Global pointer for the signal handler.
  264. thread_suspender_instance = &thread_suspender;
  265. // Alternate stack for signal handling.
  266. InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);
  267. stack_t handler_stack;
  268. internal_memset(&handler_stack, 0, sizeof(handler_stack));
  269. handler_stack.ss_sp = handler_stack_memory.data();
  270. handler_stack.ss_size = kHandlerStackSize;
  271. internal_sigaltstack(&handler_stack, nullptr);
  272. // Install our handler for synchronous signals. Other signals should be
  273. // blocked by the mask we inherited from the parent thread.
  274. for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
  275. __sanitizer_sigaction act;
  276. internal_memset(&act, 0, sizeof(act));
  277. act.sigaction = TracerThreadSignalHandler;
  278. act.sa_flags = SA_ONSTACK | SA_SIGINFO;
  279. internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
  280. }
  281. int exit_code = 0;
  282. if (!thread_suspender.SuspendAllThreads()) {
  283. VReport(1, "Failed suspending threads.\n");
  284. exit_code = 3;
  285. } else {
  286. tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
  287. tracer_thread_argument->callback_argument);
  288. thread_suspender.ResumeAllThreads();
  289. exit_code = 0;
  290. }
  291. RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
  292. thread_suspender_instance = nullptr;
  293. atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);
  294. return exit_code;
  295. }
  296. class ScopedStackSpaceWithGuard {
  297. public:
  298. explicit ScopedStackSpaceWithGuard(uptr stack_size) {
  299. stack_size_ = stack_size;
  300. guard_size_ = GetPageSizeCached();
  301. // FIXME: Omitting MAP_STACK here works in current kernels but might break
  302. // in the future.
  303. guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,
  304. "ScopedStackWithGuard");
  305. CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
  306. }
  307. ~ScopedStackSpaceWithGuard() {
  308. UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
  309. }
  310. void *Bottom() const {
  311. return (void *)(guard_start_ + stack_size_ + guard_size_);
  312. }
  313. private:
  314. uptr stack_size_;
  315. uptr guard_size_;
  316. uptr guard_start_;
  317. };
  318. // We have a limitation on the stack frame size, so some stuff had to be moved
  319. // into globals.
  320. static __sanitizer_sigset_t blocked_sigset;
  321. static __sanitizer_sigset_t old_sigset;
  322. class StopTheWorldScope {
  323. public:
  324. StopTheWorldScope() {
  325. // Make this process dumpable. Processes that are not dumpable cannot be
  326. // attached to.
  327. process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
  328. if (!process_was_dumpable_)
  329. internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
  330. }
  331. ~StopTheWorldScope() {
  332. // Restore the dumpable flag.
  333. if (!process_was_dumpable_)
  334. internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
  335. }
  336. private:
  337. int process_was_dumpable_;
  338. };
  339. // When sanitizer output is being redirected to file (i.e. by using log_path),
  340. // the tracer should write to the parent's log instead of trying to open a new
  341. // file. Alert the logging code to the fact that we have a tracer.
  342. struct ScopedSetTracerPID {
  343. explicit ScopedSetTracerPID(uptr tracer_pid) {
  344. stoptheworld_tracer_pid = tracer_pid;
  345. stoptheworld_tracer_ppid = internal_getpid();
  346. }
  347. ~ScopedSetTracerPID() {
  348. stoptheworld_tracer_pid = 0;
  349. stoptheworld_tracer_ppid = 0;
  350. }
  351. };
  352. void StopTheWorld(StopTheWorldCallback callback, void *argument) {
  353. StopTheWorldScope in_stoptheworld;
  354. // Prepare the arguments for TracerThread.
  355. struct TracerThreadArgument tracer_thread_argument;
  356. tracer_thread_argument.callback = callback;
  357. tracer_thread_argument.callback_argument = argument;
  358. tracer_thread_argument.parent_pid = internal_getpid();
  359. atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);
  360. const uptr kTracerStackSize = 2 * 1024 * 1024;
  361. ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
  362. // Block the execution of TracerThread until after we have set ptrace
  363. // permissions.
  364. tracer_thread_argument.mutex.Lock();
  365. // Signal handling story.
  366. // We don't want async signals to be delivered to the tracer thread,
  367. // so we block all async signals before creating the thread. An async signal
  368. // handler can temporary modify errno, which is shared with this thread.
  369. // We ought to use pthread_sigmask here, because sigprocmask has undefined
  370. // behavior in multithreaded programs. However, on linux sigprocmask is
  371. // equivalent to pthread_sigmask with the exception that pthread_sigmask
  372. // does not allow to block some signals used internally in pthread
  373. // implementation. We are fine with blocking them here, we are really not
  374. // going to pthread_cancel the thread.
  375. // The tracer thread should not raise any synchronous signals. But in case it
  376. // does, we setup a special handler for sync signals that properly kills the
  377. // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers
  378. // in the tracer thread won't interfere with user program. Double note: if a
  379. // user does something along the lines of 'kill -11 pid', that can kill the
  380. // process even if user setup own handler for SEGV.
  381. // Thing to watch out for: this code should not change behavior of user code
  382. // in any observable way. In particular it should not override user signal
  383. // handlers.
  384. internal_sigfillset(&blocked_sigset);
  385. for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
  386. internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
  387. int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
  388. CHECK_EQ(rv, 0);
  389. uptr tracer_pid = internal_clone(
  390. TracerThread, tracer_stack.Bottom(),
  391. CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
  392. &tracer_thread_argument, nullptr /* parent_tidptr */,
  393. nullptr /* newtls */, nullptr /* child_tidptr */);
  394. internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
  395. int local_errno = 0;
  396. if (internal_iserror(tracer_pid, &local_errno)) {
  397. VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
  398. tracer_thread_argument.mutex.Unlock();
  399. } else {
  400. ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
  401. // On some systems we have to explicitly declare that we want to be traced
  402. // by the tracer thread.
  403. internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
  404. // Allow the tracer thread to start.
  405. tracer_thread_argument.mutex.Unlock();
  406. // NOTE: errno is shared between this thread and the tracer thread.
  407. // internal_waitpid() may call syscall() which can access/spoil errno,
  408. // so we can't call it now. Instead we for the tracer thread to finish using
  409. // the spin loop below. Man page for sched_yield() says "In the Linux
  410. // implementation, sched_yield() always succeeds", so let's hope it does not
  411. // spoil errno. Note that this spin loop runs only for brief periods before
  412. // the tracer thread has suspended us and when it starts unblocking threads.
  413. while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)
  414. sched_yield();
  415. // Now the tracer thread is about to exit and does not touch errno,
  416. // wait for it.
  417. for (;;) {
  418. uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);
  419. if (!internal_iserror(waitpid_status, &local_errno))
  420. break;
  421. if (local_errno == EINTR)
  422. continue;
  423. VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
  424. local_errno);
  425. break;
  426. }
  427. }
  428. }
  429. // Platform-specific methods from SuspendedThreadsList.
  430. #if SANITIZER_ANDROID && defined(__arm__)
  431. typedef pt_regs regs_struct;
  432. #define REG_SP ARM_sp
  433. #elif SANITIZER_LINUX && defined(__arm__)
  434. typedef user_regs regs_struct;
  435. #define REG_SP uregs[13]
  436. #elif defined(__i386__) || defined(__x86_64__)
  437. typedef user_regs_struct regs_struct;
  438. #if defined(__i386__)
  439. #define REG_SP esp
  440. #else
  441. #define REG_SP rsp
  442. #endif
  443. #define ARCH_IOVEC_FOR_GETREGSET
  444. // Support ptrace extensions even when compiled without required kernel support
  445. #ifndef NT_X86_XSTATE
  446. #define NT_X86_XSTATE 0x202
  447. #endif
  448. #ifndef PTRACE_GETREGSET
  449. #define PTRACE_GETREGSET 0x4204
  450. #endif
  451. // Compiler may use FP registers to store pointers.
  452. static constexpr uptr kExtraRegs[] = {NT_X86_XSTATE, NT_FPREGSET};
  453. #elif defined(__powerpc__) || defined(__powerpc64__)
  454. typedef pt_regs regs_struct;
  455. #define REG_SP gpr[PT_R1]
  456. #elif defined(__mips__)
  457. typedef struct user regs_struct;
  458. # if SANITIZER_ANDROID
  459. # define REG_SP regs[EF_R29]
  460. # else
  461. # define REG_SP regs[EF_REG29]
  462. # endif
  463. #elif defined(__aarch64__)
  464. typedef struct user_pt_regs regs_struct;
  465. #define REG_SP sp
  466. static constexpr uptr kExtraRegs[] = {0};
  467. #define ARCH_IOVEC_FOR_GETREGSET
  468. #elif SANITIZER_RISCV64
  469. typedef struct user_regs_struct regs_struct;
  470. // sys/ucontext.h already defines REG_SP as 2. Undefine it first.
  471. #undef REG_SP
  472. #define REG_SP sp
  473. static constexpr uptr kExtraRegs[] = {0};
  474. #define ARCH_IOVEC_FOR_GETREGSET
  475. #elif defined(__s390__)
  476. typedef _user_regs_struct regs_struct;
  477. #define REG_SP gprs[15]
  478. static constexpr uptr kExtraRegs[] = {0};
  479. #define ARCH_IOVEC_FOR_GETREGSET
  480. #else
  481. #error "Unsupported architecture"
  482. #endif // SANITIZER_ANDROID && defined(__arm__)
  483. tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const {
  484. CHECK_LT(index, thread_ids_.size());
  485. return thread_ids_[index];
  486. }
  487. uptr SuspendedThreadsListLinux::ThreadCount() const {
  488. return thread_ids_.size();
  489. }
  490. bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const {
  491. for (uptr i = 0; i < thread_ids_.size(); i++) {
  492. if (thread_ids_[i] == thread_id) return true;
  493. }
  494. return false;
  495. }
  496. void SuspendedThreadsListLinux::Append(tid_t tid) {
  497. thread_ids_.push_back(tid);
  498. }
  499. PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
  500. uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
  501. pid_t tid = GetThreadID(index);
  502. constexpr uptr uptr_sz = sizeof(uptr);
  503. int pterrno;
  504. #ifdef ARCH_IOVEC_FOR_GETREGSET
  505. auto append = [&](uptr regset) {
  506. uptr size = buffer->size();
  507. // NT_X86_XSTATE requires 64bit alignment.
  508. uptr size_up = RoundUpTo(size, 8 / uptr_sz);
  509. buffer->reserve(Max<uptr>(1024, size_up));
  510. struct iovec regset_io;
  511. for (;; buffer->resize(buffer->capacity() * 2)) {
  512. buffer->resize(buffer->capacity());
  513. uptr available_bytes = (buffer->size() - size_up) * uptr_sz;
  514. regset_io.iov_base = buffer->data() + size_up;
  515. regset_io.iov_len = available_bytes;
  516. bool fail =
  517. internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
  518. (void *)regset, (void *)&regset_io),
  519. &pterrno);
  520. if (fail) {
  521. VReport(1, "Could not get regset %p from thread %d (errno %d).\n",
  522. (void *)regset, tid, pterrno);
  523. buffer->resize(size);
  524. return false;
  525. }
  526. // Far enough from the buffer size, no need to resize and repeat.
  527. if (regset_io.iov_len + 64 < available_bytes)
  528. break;
  529. }
  530. buffer->resize(size_up + RoundUpTo(regset_io.iov_len, uptr_sz) / uptr_sz);
  531. return true;
  532. };
  533. buffer->clear();
  534. bool fail = !append(NT_PRSTATUS);
  535. if (!fail) {
  536. // Accept the first available and do not report errors.
  537. for (uptr regs : kExtraRegs)
  538. if (regs && append(regs))
  539. break;
  540. }
  541. #else
  542. buffer->resize(RoundUpTo(sizeof(regs_struct), uptr_sz) / uptr_sz);
  543. bool fail = internal_iserror(
  544. internal_ptrace(PTRACE_GETREGS, tid, nullptr, buffer->data()), &pterrno);
  545. if (fail)
  546. VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
  547. pterrno);
  548. #endif
  549. if (fail) {
  550. // ESRCH means that the given thread is not suspended or already dead.
  551. // Therefore it's unsafe to inspect its data (e.g. walk through stack) and
  552. // we should notify caller about this.
  553. return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
  554. : REGISTERS_UNAVAILABLE;
  555. }
  556. *sp = reinterpret_cast<regs_struct *>(buffer->data())[0].REG_SP;
  557. return REGISTERS_AVAILABLE;
  558. }
  559. } // namespace __sanitizer
  560. #endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
  561. // || defined(__aarch64__) || defined(__powerpc64__)
  562. // || defined(__s390__) || defined(__i386__) || defined(__arm__)