record-btrace.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215
  1. /* Branch trace support for GDB, the GNU debugger.
  2. Copyright (C) 2013-2022 Free Software Foundation, Inc.
  3. Contributed by Intel Corp. <markus.t.metzger@intel.com>
  4. This file is part of GDB.
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. #include "defs.h"
  16. #include "record.h"
  17. #include "record-btrace.h"
  18. #include "gdbthread.h"
  19. #include "target.h"
  20. #include "gdbcmd.h"
  21. #include "disasm.h"
  22. #include "observable.h"
  23. #include "cli/cli-utils.h"
  24. #include "source.h"
  25. #include "ui-out.h"
  26. #include "symtab.h"
  27. #include "filenames.h"
  28. #include "regcache.h"
  29. #include "frame-unwind.h"
  30. #include "hashtab.h"
  31. #include "infrun.h"
  32. #include "gdbsupport/event-loop.h"
  33. #include "inf-loop.h"
  34. #include "inferior.h"
  35. #include <algorithm>
  36. #include "gdbarch.h"
  37. #include "cli/cli-style.h"
  38. #include "async-event.h"
  39. #include <forward_list>
  40. static const target_info record_btrace_target_info = {
  41. "record-btrace",
  42. N_("Branch tracing target"),
  43. N_("Collect control-flow trace and provide the execution history.")
  44. };
  45. /* The target_ops of record-btrace. */
  46. class record_btrace_target final : public target_ops
  47. {
  48. public:
  49. const target_info &info () const override
  50. { return record_btrace_target_info; }
  51. strata stratum () const override { return record_stratum; }
  52. void close () override;
  53. void async (int) override;
  54. void detach (inferior *inf, int from_tty) override
  55. { record_detach (this, inf, from_tty); }
  56. void disconnect (const char *, int) override;
  57. void mourn_inferior () override
  58. { record_mourn_inferior (this); }
  59. void kill () override
  60. { record_kill (this); }
  61. enum record_method record_method (ptid_t ptid) override;
  62. void stop_recording () override;
  63. void info_record () override;
  64. void insn_history (int size, gdb_disassembly_flags flags) override;
  65. void insn_history_from (ULONGEST from, int size,
  66. gdb_disassembly_flags flags) override;
  67. void insn_history_range (ULONGEST begin, ULONGEST end,
  68. gdb_disassembly_flags flags) override;
  69. void call_history (int size, record_print_flags flags) override;
  70. void call_history_from (ULONGEST begin, int size, record_print_flags flags)
  71. override;
  72. void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
  73. override;
  74. bool record_is_replaying (ptid_t ptid) override;
  75. bool record_will_replay (ptid_t ptid, int dir) override;
  76. void record_stop_replaying () override;
  77. enum target_xfer_status xfer_partial (enum target_object object,
  78. const char *annex,
  79. gdb_byte *readbuf,
  80. const gdb_byte *writebuf,
  81. ULONGEST offset, ULONGEST len,
  82. ULONGEST *xfered_len) override;
  83. int insert_breakpoint (struct gdbarch *,
  84. struct bp_target_info *) override;
  85. int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
  86. enum remove_bp_reason) override;
  87. void fetch_registers (struct regcache *, int) override;
  88. void store_registers (struct regcache *, int) override;
  89. void prepare_to_store (struct regcache *) override;
  90. const struct frame_unwind *get_unwinder () override;
  91. const struct frame_unwind *get_tailcall_unwinder () override;
  92. void resume (ptid_t, int, enum gdb_signal) override;
  93. ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
  94. void stop (ptid_t) override;
  95. void update_thread_list () override;
  96. bool thread_alive (ptid_t ptid) override;
  97. void goto_record_begin () override;
  98. void goto_record_end () override;
  99. void goto_record (ULONGEST insn) override;
  100. bool can_execute_reverse () override;
  101. bool stopped_by_sw_breakpoint () override;
  102. bool supports_stopped_by_sw_breakpoint () override;
  103. bool stopped_by_hw_breakpoint () override;
  104. bool supports_stopped_by_hw_breakpoint () override;
  105. enum exec_direction_kind execution_direction () override;
  106. void prepare_to_generate_core () override;
  107. void done_generating_core () override;
  108. };
  109. static record_btrace_target record_btrace_ops;
  110. /* Initialize the record-btrace target ops. */
  111. /* Token associated with a new-thread observer enabling branch tracing
  112. for the new thread. */
  113. static const gdb::observers::token record_btrace_thread_observer_token {};
  114. /* Memory access types used in set/show record btrace replay-memory-access. */
  115. static const char replay_memory_access_read_only[] = "read-only";
  116. static const char replay_memory_access_read_write[] = "read-write";
  117. static const char *const replay_memory_access_types[] =
  118. {
  119. replay_memory_access_read_only,
  120. replay_memory_access_read_write,
  121. NULL
  122. };
  123. /* The currently allowed replay memory access type. */
  124. static const char *replay_memory_access = replay_memory_access_read_only;
  125. /* The cpu state kinds. */
  126. enum record_btrace_cpu_state_kind
  127. {
  128. CS_AUTO,
  129. CS_NONE,
  130. CS_CPU
  131. };
  132. /* The current cpu state. */
  133. static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
  134. /* The current cpu for trace decode. */
  135. static struct btrace_cpu record_btrace_cpu;
  136. /* Command lists for "set/show record btrace". */
  137. static struct cmd_list_element *set_record_btrace_cmdlist;
  138. static struct cmd_list_element *show_record_btrace_cmdlist;
  139. /* The execution direction of the last resume we got. See record-full.c. */
  140. static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
  141. /* The async event handler for reverse/replay execution. */
  142. static struct async_event_handler *record_btrace_async_inferior_event_handler;
  143. /* A flag indicating that we are currently generating a core file. */
  144. static int record_btrace_generating_corefile;
  145. /* The current branch trace configuration. */
  146. static struct btrace_config record_btrace_conf;
  147. /* Command list for "record btrace". */
  148. static struct cmd_list_element *record_btrace_cmdlist;
  149. /* Command lists for "set/show record btrace bts". */
  150. static struct cmd_list_element *set_record_btrace_bts_cmdlist;
  151. static struct cmd_list_element *show_record_btrace_bts_cmdlist;
  152. /* Command lists for "set/show record btrace pt". */
  153. static struct cmd_list_element *set_record_btrace_pt_cmdlist;
  154. static struct cmd_list_element *show_record_btrace_pt_cmdlist;
  155. /* Command list for "set record btrace cpu". */
  156. static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
  157. /* Print a record-btrace debug message. Use do ... while (0) to avoid
  158. ambiguities when used in if statements. */
  159. #define DEBUG(msg, args...) \
  160. do \
  161. { \
  162. if (record_debug != 0) \
  163. gdb_printf (gdb_stdlog, \
  164. "[record-btrace] " msg "\n", ##args); \
  165. } \
  166. while (0)
  167. /* Return the cpu configured by the user. Returns NULL if the cpu was
  168. configured as auto. */
  169. const struct btrace_cpu *
  170. record_btrace_get_cpu (void)
  171. {
  172. switch (record_btrace_cpu_state)
  173. {
  174. case CS_AUTO:
  175. return nullptr;
  176. case CS_NONE:
  177. record_btrace_cpu.vendor = CV_UNKNOWN;
  178. /* Fall through. */
  179. case CS_CPU:
  180. return &record_btrace_cpu;
  181. }
  182. error (_("Internal error: bad record btrace cpu state."));
  183. }
  184. /* Update the branch trace for the current thread and return a pointer to its
  185. thread_info.
  186. Throws an error if there is no thread or no trace. This function never
  187. returns NULL. */
  188. static struct thread_info *
  189. require_btrace_thread (void)
  190. {
  191. DEBUG ("require");
  192. if (inferior_ptid == null_ptid)
  193. error (_("No thread."));
  194. thread_info *tp = inferior_thread ();
  195. validate_registers_access ();
  196. btrace_fetch (tp, record_btrace_get_cpu ());
  197. if (btrace_is_empty (tp))
  198. error (_("No trace."));
  199. return tp;
  200. }
  201. /* Update the branch trace for the current thread and return a pointer to its
  202. branch trace information struct.
  203. Throws an error if there is no thread or no trace. This function never
  204. returns NULL. */
  205. static struct btrace_thread_info *
  206. require_btrace (void)
  207. {
  208. struct thread_info *tp;
  209. tp = require_btrace_thread ();
  210. return &tp->btrace;
  211. }
  212. /* The new thread observer. */
  213. static void
  214. record_btrace_on_new_thread (struct thread_info *tp)
  215. {
  216. /* Ignore this thread if its inferior is not recorded by us. */
  217. target_ops *rec = tp->inf->target_at (record_stratum);
  218. if (rec != &record_btrace_ops)
  219. return;
  220. try
  221. {
  222. btrace_enable (tp, &record_btrace_conf);
  223. }
  224. catch (const gdb_exception_error &error)
  225. {
  226. warning ("%s", error.what ());
  227. }
  228. }
  229. /* Enable automatic tracing of new threads. */
  230. static void
  231. record_btrace_auto_enable (void)
  232. {
  233. DEBUG ("attach thread observer");
  234. gdb::observers::new_thread.attach (record_btrace_on_new_thread,
  235. record_btrace_thread_observer_token,
  236. "record-btrace");
  237. }
  238. /* Disable automatic tracing of new threads. */
  239. static void
  240. record_btrace_auto_disable (void)
  241. {
  242. DEBUG ("detach thread observer");
  243. gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
  244. }
  245. /* The record-btrace async event handler function. */
  246. static void
  247. record_btrace_handle_async_inferior_event (gdb_client_data data)
  248. {
  249. inferior_event_handler (INF_REG_EVENT);
  250. }
  251. /* See record-btrace.h. */
  252. void
  253. record_btrace_push_target (void)
  254. {
  255. const char *format;
  256. record_btrace_auto_enable ();
  257. current_inferior ()->push_target (&record_btrace_ops);
  258. record_btrace_async_inferior_event_handler
  259. = create_async_event_handler (record_btrace_handle_async_inferior_event,
  260. NULL, "record-btrace");
  261. record_btrace_generating_corefile = 0;
  262. format = btrace_format_short_string (record_btrace_conf.format);
  263. gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
  264. }
  265. /* Disable btrace on a set of threads on scope exit. */
  266. struct scoped_btrace_disable
  267. {
  268. scoped_btrace_disable () = default;
  269. DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
  270. ~scoped_btrace_disable ()
  271. {
  272. for (thread_info *tp : m_threads)
  273. btrace_disable (tp);
  274. }
  275. void add_thread (thread_info *thread)
  276. {
  277. m_threads.push_front (thread);
  278. }
  279. void discard ()
  280. {
  281. m_threads.clear ();
  282. }
  283. private:
  284. std::forward_list<thread_info *> m_threads;
  285. };
  286. /* Open target record-btrace. */
  287. static void
  288. record_btrace_target_open (const char *args, int from_tty)
  289. {
  290. /* If we fail to enable btrace for one thread, disable it for the threads for
  291. which it was successfully enabled. */
  292. scoped_btrace_disable btrace_disable;
  293. DEBUG ("open");
  294. record_preopen ();
  295. if (!target_has_execution ())
  296. error (_("The program is not being run."));
  297. for (thread_info *tp : current_inferior ()->non_exited_threads ())
  298. if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
  299. {
  300. btrace_enable (tp, &record_btrace_conf);
  301. btrace_disable.add_thread (tp);
  302. }
  303. record_btrace_push_target ();
  304. btrace_disable.discard ();
  305. }
  306. /* The stop_recording method of target record-btrace. */
  307. void
  308. record_btrace_target::stop_recording ()
  309. {
  310. DEBUG ("stop recording");
  311. record_btrace_auto_disable ();
  312. for (thread_info *tp : current_inferior ()->non_exited_threads ())
  313. if (tp->btrace.target != NULL)
  314. btrace_disable (tp);
  315. }
  316. /* The disconnect method of target record-btrace. */
  317. void
  318. record_btrace_target::disconnect (const char *args,
  319. int from_tty)
  320. {
  321. struct target_ops *beneath = this->beneath ();
  322. /* Do not stop recording, just clean up GDB side. */
  323. current_inferior ()->unpush_target (this);
  324. /* Forward disconnect. */
  325. beneath->disconnect (args, from_tty);
  326. }
  327. /* The close method of target record-btrace. */
  328. void
  329. record_btrace_target::close ()
  330. {
  331. if (record_btrace_async_inferior_event_handler != NULL)
  332. delete_async_event_handler (&record_btrace_async_inferior_event_handler);
  333. /* Make sure automatic recording gets disabled even if we did not stop
  334. recording before closing the record-btrace target. */
  335. record_btrace_auto_disable ();
  336. /* We should have already stopped recording.
  337. Tear down btrace in case we have not. */
  338. for (thread_info *tp : current_inferior ()->non_exited_threads ())
  339. btrace_teardown (tp);
  340. }
  341. /* The async method of target record-btrace. */
  342. void
  343. record_btrace_target::async (int enable)
  344. {
  345. if (enable)
  346. mark_async_event_handler (record_btrace_async_inferior_event_handler);
  347. else
  348. clear_async_event_handler (record_btrace_async_inferior_event_handler);
  349. this->beneath ()->async (enable);
  350. }
  351. /* Adjusts the size and returns a human readable size suffix. */
  352. static const char *
  353. record_btrace_adjust_size (unsigned int *size)
  354. {
  355. unsigned int sz;
  356. sz = *size;
  357. if ((sz & ((1u << 30) - 1)) == 0)
  358. {
  359. *size = sz >> 30;
  360. return "GB";
  361. }
  362. else if ((sz & ((1u << 20) - 1)) == 0)
  363. {
  364. *size = sz >> 20;
  365. return "MB";
  366. }
  367. else if ((sz & ((1u << 10) - 1)) == 0)
  368. {
  369. *size = sz >> 10;
  370. return "kB";
  371. }
  372. else
  373. return "";
  374. }
  375. /* Print a BTS configuration. */
  376. static void
  377. record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
  378. {
  379. const char *suffix;
  380. unsigned int size;
  381. size = conf->size;
  382. if (size > 0)
  383. {
  384. suffix = record_btrace_adjust_size (&size);
  385. gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
  386. }
  387. }
  388. /* Print an Intel Processor Trace configuration. */
  389. static void
  390. record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
  391. {
  392. const char *suffix;
  393. unsigned int size;
  394. size = conf->size;
  395. if (size > 0)
  396. {
  397. suffix = record_btrace_adjust_size (&size);
  398. gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
  399. }
  400. }
  401. /* Print a branch tracing configuration. */
  402. static void
  403. record_btrace_print_conf (const struct btrace_config *conf)
  404. {
  405. gdb_printf (_("Recording format: %s.\n"),
  406. btrace_format_string (conf->format));
  407. switch (conf->format)
  408. {
  409. case BTRACE_FORMAT_NONE:
  410. return;
  411. case BTRACE_FORMAT_BTS:
  412. record_btrace_print_bts_conf (&conf->bts);
  413. return;
  414. case BTRACE_FORMAT_PT:
  415. record_btrace_print_pt_conf (&conf->pt);
  416. return;
  417. }
  418. internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
  419. }
  420. /* The info_record method of target record-btrace. */
  421. void
  422. record_btrace_target::info_record ()
  423. {
  424. struct btrace_thread_info *btinfo;
  425. const struct btrace_config *conf;
  426. struct thread_info *tp;
  427. unsigned int insns, calls, gaps;
  428. DEBUG ("info");
  429. if (inferior_ptid == null_ptid)
  430. error (_("No thread."));
  431. tp = inferior_thread ();
  432. validate_registers_access ();
  433. btinfo = &tp->btrace;
  434. conf = ::btrace_conf (btinfo);
  435. if (conf != NULL)
  436. record_btrace_print_conf (conf);
  437. btrace_fetch (tp, record_btrace_get_cpu ());
  438. insns = 0;
  439. calls = 0;
  440. gaps = 0;
  441. if (!btrace_is_empty (tp))
  442. {
  443. struct btrace_call_iterator call;
  444. struct btrace_insn_iterator insn;
  445. btrace_call_end (&call, btinfo);
  446. btrace_call_prev (&call, 1);
  447. calls = btrace_call_number (&call);
  448. btrace_insn_end (&insn, btinfo);
  449. insns = btrace_insn_number (&insn);
  450. /* If the last instruction is not a gap, it is the current instruction
  451. that is not actually part of the record. */
  452. if (btrace_insn_get (&insn) != NULL)
  453. insns -= 1;
  454. gaps = btinfo->ngaps;
  455. }
  456. gdb_printf (_("Recorded %u instructions in %u functions (%u gaps) "
  457. "for thread %s (%s).\n"), insns, calls, gaps,
  458. print_thread_id (tp),
  459. target_pid_to_str (tp->ptid).c_str ());
  460. if (btrace_is_replaying (tp))
  461. gdb_printf (_("Replay in progress. At instruction %u.\n"),
  462. btrace_insn_number (btinfo->replay));
  463. }
  464. /* Print a decode error. */
  465. static void
  466. btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
  467. enum btrace_format format)
  468. {
  469. const char *errstr = btrace_decode_error (format, errcode);
  470. uiout->text (_("["));
  471. /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
  472. if (!(format == BTRACE_FORMAT_PT && errcode > 0))
  473. {
  474. uiout->text (_("decode error ("));
  475. uiout->field_signed ("errcode", errcode);
  476. uiout->text (_("): "));
  477. }
  478. uiout->text (errstr);
  479. uiout->text (_("]\n"));
  480. }
  481. /* A range of source lines. */
  482. struct btrace_line_range
  483. {
  484. /* The symtab this line is from. */
  485. struct symtab *symtab;
  486. /* The first line (inclusive). */
  487. int begin;
  488. /* The last line (exclusive). */
  489. int end;
  490. };
  491. /* Construct a line range. */
  492. static struct btrace_line_range
  493. btrace_mk_line_range (struct symtab *symtab, int begin, int end)
  494. {
  495. struct btrace_line_range range;
  496. range.symtab = symtab;
  497. range.begin = begin;
  498. range.end = end;
  499. return range;
  500. }
  501. /* Add a line to a line range. */
  502. static struct btrace_line_range
  503. btrace_line_range_add (struct btrace_line_range range, int line)
  504. {
  505. if (range.end <= range.begin)
  506. {
  507. /* This is the first entry. */
  508. range.begin = line;
  509. range.end = line + 1;
  510. }
  511. else if (line < range.begin)
  512. range.begin = line;
  513. else if (range.end < line)
  514. range.end = line;
  515. return range;
  516. }
  517. /* Return non-zero if RANGE is empty, zero otherwise. */
  518. static int
  519. btrace_line_range_is_empty (struct btrace_line_range range)
  520. {
  521. return range.end <= range.begin;
  522. }
  523. /* Return non-zero if LHS contains RHS, zero otherwise. */
  524. static int
  525. btrace_line_range_contains_range (struct btrace_line_range lhs,
  526. struct btrace_line_range rhs)
  527. {
  528. return ((lhs.symtab == rhs.symtab)
  529. && (lhs.begin <= rhs.begin)
  530. && (rhs.end <= lhs.end));
  531. }
  532. /* Find the line range associated with PC. */
  533. static struct btrace_line_range
  534. btrace_find_line_range (CORE_ADDR pc)
  535. {
  536. struct btrace_line_range range;
  537. struct linetable_entry *lines;
  538. struct linetable *ltable;
  539. struct symtab *symtab;
  540. int nlines, i;
  541. symtab = find_pc_line_symtab (pc);
  542. if (symtab == NULL)
  543. return btrace_mk_line_range (NULL, 0, 0);
  544. ltable = symtab->linetable ();
  545. if (ltable == NULL)
  546. return btrace_mk_line_range (symtab, 0, 0);
  547. nlines = ltable->nitems;
  548. lines = ltable->item;
  549. if (nlines <= 0)
  550. return btrace_mk_line_range (symtab, 0, 0);
  551. range = btrace_mk_line_range (symtab, 0, 0);
  552. for (i = 0; i < nlines - 1; i++)
  553. {
  554. /* The test of is_stmt here was added when the is_stmt field was
  555. introduced to the 'struct linetable_entry' structure. This
  556. ensured that this loop maintained the same behaviour as before we
  557. introduced is_stmt. That said, it might be that we would be
  558. better off not checking is_stmt here, this would lead to us
  559. possibly adding more line numbers to the range. At the time this
  560. change was made I was unsure how to test this so chose to go with
  561. maintaining the existing experience. */
  562. if ((lines[i].pc == pc) && (lines[i].line != 0)
  563. && (lines[i].is_stmt == 1))
  564. range = btrace_line_range_add (range, lines[i].line);
  565. }
  566. return range;
  567. }
  568. /* Print source lines in LINES to UIOUT.
  569. UI_ITEM_CHAIN is a cleanup chain for the last source line and the
  570. instructions corresponding to that source line. When printing a new source
  571. line, we do the cleanups for the open chain and open a new cleanup chain for
  572. the new source line. If the source line range in LINES is not empty, this
  573. function will leave the cleanup chain for the last printed source line open
  574. so instructions can be added to it. */
  575. static void
  576. btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
  577. gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
  578. gdb::optional<ui_out_emit_list> *asm_list,
  579. gdb_disassembly_flags flags)
  580. {
  581. print_source_lines_flags psl_flags;
  582. if (flags & DISASSEMBLY_FILENAME)
  583. psl_flags |= PRINT_SOURCE_LINES_FILENAME;
  584. for (int line = lines.begin; line < lines.end; ++line)
  585. {
  586. asm_list->reset ();
  587. src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
  588. print_source_lines (lines.symtab, line, line + 1, psl_flags);
  589. asm_list->emplace (uiout, "line_asm_insn");
  590. }
  591. }
  592. /* Disassemble a section of the recorded instruction trace. */
  593. static void
  594. btrace_insn_history (struct ui_out *uiout,
  595. const struct btrace_thread_info *btinfo,
  596. const struct btrace_insn_iterator *begin,
  597. const struct btrace_insn_iterator *end,
  598. gdb_disassembly_flags flags)
  599. {
  600. DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
  601. btrace_insn_number (begin), btrace_insn_number (end));
  602. flags |= DISASSEMBLY_SPECULATIVE;
  603. struct gdbarch *gdbarch = target_gdbarch ();
  604. btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
  605. ui_out_emit_list list_emitter (uiout, "asm_insns");
  606. gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
  607. gdb::optional<ui_out_emit_list> asm_list;
  608. gdb_pretty_print_disassembler disasm (gdbarch, uiout);
  609. for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
  610. btrace_insn_next (&it, 1))
  611. {
  612. const struct btrace_insn *insn;
  613. insn = btrace_insn_get (&it);
  614. /* A NULL instruction indicates a gap in the trace. */
  615. if (insn == NULL)
  616. {
  617. const struct btrace_config *conf;
  618. conf = btrace_conf (btinfo);
  619. /* We have trace so we must have a configuration. */
  620. gdb_assert (conf != NULL);
  621. uiout->field_fmt ("insn-number", "%u",
  622. btrace_insn_number (&it));
  623. uiout->text ("\t");
  624. btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
  625. conf->format);
  626. }
  627. else
  628. {
  629. struct disasm_insn dinsn;
  630. if ((flags & DISASSEMBLY_SOURCE) != 0)
  631. {
  632. struct btrace_line_range lines;
  633. lines = btrace_find_line_range (insn->pc);
  634. if (!btrace_line_range_is_empty (lines)
  635. && !btrace_line_range_contains_range (last_lines, lines))
  636. {
  637. btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
  638. flags);
  639. last_lines = lines;
  640. }
  641. else if (!src_and_asm_tuple.has_value ())
  642. {
  643. gdb_assert (!asm_list.has_value ());
  644. src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
  645. /* No source information. */
  646. asm_list.emplace (uiout, "line_asm_insn");
  647. }
  648. gdb_assert (src_and_asm_tuple.has_value ());
  649. gdb_assert (asm_list.has_value ());
  650. }
  651. memset (&dinsn, 0, sizeof (dinsn));
  652. dinsn.number = btrace_insn_number (&it);
  653. dinsn.addr = insn->pc;
  654. if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
  655. dinsn.is_speculative = 1;
  656. disasm.pretty_print_insn (&dinsn, flags);
  657. }
  658. }
  659. }
  660. /* The insn_history method of target record-btrace. */
  661. void
  662. record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
  663. {
  664. struct btrace_thread_info *btinfo;
  665. struct btrace_insn_history *history;
  666. struct btrace_insn_iterator begin, end;
  667. struct ui_out *uiout;
  668. unsigned int context, covered;
  669. uiout = current_uiout;
  670. ui_out_emit_tuple tuple_emitter (uiout, "insn history");
  671. context = abs (size);
  672. if (context == 0)
  673. error (_("Bad record instruction-history-size."));
  674. btinfo = require_btrace ();
  675. history = btinfo->insn_history;
  676. if (history == NULL)
  677. {
  678. struct btrace_insn_iterator *replay;
  679. DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
  680. /* If we're replaying, we start at the replay position. Otherwise, we
  681. start at the tail of the trace. */
  682. replay = btinfo->replay;
  683. if (replay != NULL)
  684. begin = *replay;
  685. else
  686. btrace_insn_end (&begin, btinfo);
  687. /* We start from here and expand in the requested direction. Then we
  688. expand in the other direction, as well, to fill up any remaining
  689. context. */
  690. end = begin;
  691. if (size < 0)
  692. {
  693. /* We want the current position covered, as well. */
  694. covered = btrace_insn_next (&end, 1);
  695. covered += btrace_insn_prev (&begin, context - covered);
  696. covered += btrace_insn_next (&end, context - covered);
  697. }
  698. else
  699. {
  700. covered = btrace_insn_next (&end, context);
  701. covered += btrace_insn_prev (&begin, context - covered);
  702. }
  703. }
  704. else
  705. {
  706. begin = history->begin;
  707. end = history->end;
  708. DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
  709. btrace_insn_number (&begin), btrace_insn_number (&end));
  710. if (size < 0)
  711. {
  712. end = begin;
  713. covered = btrace_insn_prev (&begin, context);
  714. }
  715. else
  716. {
  717. begin = end;
  718. covered = btrace_insn_next (&end, context);
  719. }
  720. }
  721. if (covered > 0)
  722. btrace_insn_history (uiout, btinfo, &begin, &end, flags);
  723. else
  724. {
  725. if (size < 0)
  726. gdb_printf (_("At the start of the branch trace record.\n"));
  727. else
  728. gdb_printf (_("At the end of the branch trace record.\n"));
  729. }
  730. btrace_set_insn_history (btinfo, &begin, &end);
  731. }
  732. /* The insn_history_range method of target record-btrace. */
  733. void
  734. record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
  735. gdb_disassembly_flags flags)
  736. {
  737. struct btrace_thread_info *btinfo;
  738. struct btrace_insn_iterator begin, end;
  739. struct ui_out *uiout;
  740. unsigned int low, high;
  741. int found;
  742. uiout = current_uiout;
  743. ui_out_emit_tuple tuple_emitter (uiout, "insn history");
  744. low = from;
  745. high = to;
  746. DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
  747. /* Check for wrap-arounds. */
  748. if (low != from || high != to)
  749. error (_("Bad range."));
  750. if (high < low)
  751. error (_("Bad range."));
  752. btinfo = require_btrace ();
  753. found = btrace_find_insn_by_number (&begin, btinfo, low);
  754. if (found == 0)
  755. error (_("Range out of bounds."));
  756. found = btrace_find_insn_by_number (&end, btinfo, high);
  757. if (found == 0)
  758. {
  759. /* Silently truncate the range. */
  760. btrace_insn_end (&end, btinfo);
  761. }
  762. else
  763. {
  764. /* We want both begin and end to be inclusive. */
  765. btrace_insn_next (&end, 1);
  766. }
  767. btrace_insn_history (uiout, btinfo, &begin, &end, flags);
  768. btrace_set_insn_history (btinfo, &begin, &end);
  769. }
  770. /* The insn_history_from method of target record-btrace. */
  771. void
  772. record_btrace_target::insn_history_from (ULONGEST from, int size,
  773. gdb_disassembly_flags flags)
  774. {
  775. ULONGEST begin, end, context;
  776. context = abs (size);
  777. if (context == 0)
  778. error (_("Bad record instruction-history-size."));
  779. if (size < 0)
  780. {
  781. end = from;
  782. if (from < context)
  783. begin = 0;
  784. else
  785. begin = from - context + 1;
  786. }
  787. else
  788. {
  789. begin = from;
  790. end = from + context - 1;
  791. /* Check for wrap-around. */
  792. if (end < begin)
  793. end = ULONGEST_MAX;
  794. }
  795. insn_history_range (begin, end, flags);
  796. }
  797. /* Print the instruction number range for a function call history line. */
  798. static void
  799. btrace_call_history_insn_range (struct ui_out *uiout,
  800. const struct btrace_function *bfun)
  801. {
  802. unsigned int begin, end, size;
  803. size = bfun->insn.size ();
  804. gdb_assert (size > 0);
  805. begin = bfun->insn_offset;
  806. end = begin + size - 1;
  807. uiout->field_unsigned ("insn begin", begin);
  808. uiout->text (",");
  809. uiout->field_unsigned ("insn end", end);
  810. }
  811. /* Compute the lowest and highest source line for the instructions in BFUN
  812. and return them in PBEGIN and PEND.
  813. Ignore instructions that can't be mapped to BFUN, e.g. instructions that
  814. result from inlining or macro expansion. */
  815. static void
  816. btrace_compute_src_line_range (const struct btrace_function *bfun,
  817. int *pbegin, int *pend)
  818. {
  819. struct symtab *symtab;
  820. struct symbol *sym;
  821. int begin, end;
  822. begin = INT_MAX;
  823. end = INT_MIN;
  824. sym = bfun->sym;
  825. if (sym == NULL)
  826. goto out;
  827. symtab = symbol_symtab (sym);
  828. for (const btrace_insn &insn : bfun->insn)
  829. {
  830. struct symtab_and_line sal;
  831. sal = find_pc_line (insn.pc, 0);
  832. if (sal.symtab != symtab || sal.line == 0)
  833. continue;
  834. begin = std::min (begin, sal.line);
  835. end = std::max (end, sal.line);
  836. }
  837. out:
  838. *pbegin = begin;
  839. *pend = end;
  840. }
  841. /* Print the source line information for a function call history line. */
  842. static void
  843. btrace_call_history_src_line (struct ui_out *uiout,
  844. const struct btrace_function *bfun)
  845. {
  846. struct symbol *sym;
  847. int begin, end;
  848. sym = bfun->sym;
  849. if (sym == NULL)
  850. return;
  851. uiout->field_string ("file",
  852. symtab_to_filename_for_display (symbol_symtab (sym)),
  853. file_name_style.style ());
  854. btrace_compute_src_line_range (bfun, &begin, &end);
  855. if (end < begin)
  856. return;
  857. uiout->text (":");
  858. uiout->field_signed ("min line", begin);
  859. if (end == begin)
  860. return;
  861. uiout->text (",");
  862. uiout->field_signed ("max line", end);
  863. }
  864. /* Get the name of a branch trace function. */
  865. static const char *
  866. btrace_get_bfun_name (const struct btrace_function *bfun)
  867. {
  868. struct minimal_symbol *msym;
  869. struct symbol *sym;
  870. if (bfun == NULL)
  871. return "??";
  872. msym = bfun->msym;
  873. sym = bfun->sym;
  874. if (sym != NULL)
  875. return sym->print_name ();
  876. else if (msym != NULL)
  877. return msym->print_name ();
  878. else
  879. return "??";
  880. }
  881. /* Disassemble a section of the recorded function trace. */
  882. static void
  883. btrace_call_history (struct ui_out *uiout,
  884. const struct btrace_thread_info *btinfo,
  885. const struct btrace_call_iterator *begin,
  886. const struct btrace_call_iterator *end,
  887. int int_flags)
  888. {
  889. struct btrace_call_iterator it;
  890. record_print_flags flags = (enum record_print_flag) int_flags;
  891. DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
  892. btrace_call_number (end));
  893. for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
  894. {
  895. const struct btrace_function *bfun;
  896. struct minimal_symbol *msym;
  897. struct symbol *sym;
  898. bfun = btrace_call_get (&it);
  899. sym = bfun->sym;
  900. msym = bfun->msym;
  901. /* Print the function index. */
  902. uiout->field_unsigned ("index", bfun->number);
  903. uiout->text ("\t");
  904. /* Indicate gaps in the trace. */
  905. if (bfun->errcode != 0)
  906. {
  907. const struct btrace_config *conf;
  908. conf = btrace_conf (btinfo);
  909. /* We have trace so we must have a configuration. */
  910. gdb_assert (conf != NULL);
  911. btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
  912. continue;
  913. }
  914. if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
  915. {
  916. int level = bfun->level + btinfo->level, i;
  917. for (i = 0; i < level; ++i)
  918. uiout->text (" ");
  919. }
  920. if (sym != NULL)
  921. uiout->field_string ("function", sym->print_name (),
  922. function_name_style.style ());
  923. else if (msym != NULL)
  924. uiout->field_string ("function", msym->print_name (),
  925. function_name_style.style ());
  926. else if (!uiout->is_mi_like_p ())
  927. uiout->field_string ("function", "??",
  928. function_name_style.style ());
  929. if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
  930. {
  931. uiout->text (_("\tinst "));
  932. btrace_call_history_insn_range (uiout, bfun);
  933. }
  934. if ((flags & RECORD_PRINT_SRC_LINE) != 0)
  935. {
  936. uiout->text (_("\tat "));
  937. btrace_call_history_src_line (uiout, bfun);
  938. }
  939. uiout->text ("\n");
  940. }
  941. }
  942. /* The call_history method of target record-btrace. */
  943. void
  944. record_btrace_target::call_history (int size, record_print_flags flags)
  945. {
  946. struct btrace_thread_info *btinfo;
  947. struct btrace_call_history *history;
  948. struct btrace_call_iterator begin, end;
  949. struct ui_out *uiout;
  950. unsigned int context, covered;
  951. uiout = current_uiout;
  952. ui_out_emit_tuple tuple_emitter (uiout, "insn history");
  953. context = abs (size);
  954. if (context == 0)
  955. error (_("Bad record function-call-history-size."));
  956. btinfo = require_btrace ();
  957. history = btinfo->call_history;
  958. if (history == NULL)
  959. {
  960. struct btrace_insn_iterator *replay;
  961. DEBUG ("call-history (0x%x): %d", (int) flags, size);
  962. /* If we're replaying, we start at the replay position. Otherwise, we
  963. start at the tail of the trace. */
  964. replay = btinfo->replay;
  965. if (replay != NULL)
  966. {
  967. begin.btinfo = btinfo;
  968. begin.index = replay->call_index;
  969. }
  970. else
  971. btrace_call_end (&begin, btinfo);
  972. /* We start from here and expand in the requested direction. Then we
  973. expand in the other direction, as well, to fill up any remaining
  974. context. */
  975. end = begin;
  976. if (size < 0)
  977. {
  978. /* We want the current position covered, as well. */
  979. covered = btrace_call_next (&end, 1);
  980. covered += btrace_call_prev (&begin, context - covered);
  981. covered += btrace_call_next (&end, context - covered);
  982. }
  983. else
  984. {
  985. covered = btrace_call_next (&end, context);
  986. covered += btrace_call_prev (&begin, context- covered);
  987. }
  988. }
  989. else
  990. {
  991. begin = history->begin;
  992. end = history->end;
  993. DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
  994. btrace_call_number (&begin), btrace_call_number (&end));
  995. if (size < 0)
  996. {
  997. end = begin;
  998. covered = btrace_call_prev (&begin, context);
  999. }
  1000. else
  1001. {
  1002. begin = end;
  1003. covered = btrace_call_next (&end, context);
  1004. }
  1005. }
  1006. if (covered > 0)
  1007. btrace_call_history (uiout, btinfo, &begin, &end, flags);
  1008. else
  1009. {
  1010. if (size < 0)
  1011. gdb_printf (_("At the start of the branch trace record.\n"));
  1012. else
  1013. gdb_printf (_("At the end of the branch trace record.\n"));
  1014. }
  1015. btrace_set_call_history (btinfo, &begin, &end);
  1016. }
  1017. /* The call_history_range method of target record-btrace. */
  1018. void
  1019. record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
  1020. record_print_flags flags)
  1021. {
  1022. struct btrace_thread_info *btinfo;
  1023. struct btrace_call_iterator begin, end;
  1024. struct ui_out *uiout;
  1025. unsigned int low, high;
  1026. int found;
  1027. uiout = current_uiout;
  1028. ui_out_emit_tuple tuple_emitter (uiout, "func history");
  1029. low = from;
  1030. high = to;
  1031. DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
  1032. /* Check for wrap-arounds. */
  1033. if (low != from || high != to)
  1034. error (_("Bad range."));
  1035. if (high < low)
  1036. error (_("Bad range."));
  1037. btinfo = require_btrace ();
  1038. found = btrace_find_call_by_number (&begin, btinfo, low);
  1039. if (found == 0)
  1040. error (_("Range out of bounds."));
  1041. found = btrace_find_call_by_number (&end, btinfo, high);
  1042. if (found == 0)
  1043. {
  1044. /* Silently truncate the range. */
  1045. btrace_call_end (&end, btinfo);
  1046. }
  1047. else
  1048. {
  1049. /* We want both begin and end to be inclusive. */
  1050. btrace_call_next (&end, 1);
  1051. }
  1052. btrace_call_history (uiout, btinfo, &begin, &end, flags);
  1053. btrace_set_call_history (btinfo, &begin, &end);
  1054. }
  1055. /* The call_history_from method of target record-btrace. */
  1056. void
  1057. record_btrace_target::call_history_from (ULONGEST from, int size,
  1058. record_print_flags flags)
  1059. {
  1060. ULONGEST begin, end, context;
  1061. context = abs (size);
  1062. if (context == 0)
  1063. error (_("Bad record function-call-history-size."));
  1064. if (size < 0)
  1065. {
  1066. end = from;
  1067. if (from < context)
  1068. begin = 0;
  1069. else
  1070. begin = from - context + 1;
  1071. }
  1072. else
  1073. {
  1074. begin = from;
  1075. end = from + context - 1;
  1076. /* Check for wrap-around. */
  1077. if (end < begin)
  1078. end = ULONGEST_MAX;
  1079. }
  1080. call_history_range ( begin, end, flags);
  1081. }
  1082. /* The record_method method of target record-btrace. */
  1083. enum record_method
  1084. record_btrace_target::record_method (ptid_t ptid)
  1085. {
  1086. process_stratum_target *proc_target = current_inferior ()->process_target ();
  1087. thread_info *const tp = find_thread_ptid (proc_target, ptid);
  1088. if (tp == NULL)
  1089. error (_("No thread."));
  1090. if (tp->btrace.target == NULL)
  1091. return RECORD_METHOD_NONE;
  1092. return RECORD_METHOD_BTRACE;
  1093. }
  1094. /* The record_is_replaying method of target record-btrace. */
  1095. bool
  1096. record_btrace_target::record_is_replaying (ptid_t ptid)
  1097. {
  1098. process_stratum_target *proc_target = current_inferior ()->process_target ();
  1099. for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
  1100. if (btrace_is_replaying (tp))
  1101. return true;
  1102. return false;
  1103. }
  1104. /* The record_will_replay method of target record-btrace. */
  1105. bool
  1106. record_btrace_target::record_will_replay (ptid_t ptid, int dir)
  1107. {
  1108. return dir == EXEC_REVERSE || record_is_replaying (ptid);
  1109. }
  1110. /* The xfer_partial method of target record-btrace. */
  1111. enum target_xfer_status
  1112. record_btrace_target::xfer_partial (enum target_object object,
  1113. const char *annex, gdb_byte *readbuf,
  1114. const gdb_byte *writebuf, ULONGEST offset,
  1115. ULONGEST len, ULONGEST *xfered_len)
  1116. {
  1117. /* Filter out requests that don't make sense during replay. */
  1118. if (replay_memory_access == replay_memory_access_read_only
  1119. && !record_btrace_generating_corefile
  1120. && record_is_replaying (inferior_ptid))
  1121. {
  1122. switch (object)
  1123. {
  1124. case TARGET_OBJECT_MEMORY:
  1125. {
  1126. const struct target_section *section;
  1127. /* We do not allow writing memory in general. */
  1128. if (writebuf != NULL)
  1129. {
  1130. *xfered_len = len;
  1131. return TARGET_XFER_UNAVAILABLE;
  1132. }
  1133. /* We allow reading readonly memory. */
  1134. section = target_section_by_addr (this, offset);
  1135. if (section != NULL)
  1136. {
  1137. /* Check if the section we found is readonly. */
  1138. if ((bfd_section_flags (section->the_bfd_section)
  1139. & SEC_READONLY) != 0)
  1140. {
  1141. /* Truncate the request to fit into this section. */
  1142. len = std::min (len, section->endaddr - offset);
  1143. break;
  1144. }
  1145. }
  1146. *xfered_len = len;
  1147. return TARGET_XFER_UNAVAILABLE;
  1148. }
  1149. }
  1150. }
  1151. /* Forward the request. */
  1152. return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
  1153. offset, len, xfered_len);
  1154. }
  1155. /* The insert_breakpoint method of target record-btrace. */
  1156. int
  1157. record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
  1158. struct bp_target_info *bp_tgt)
  1159. {
  1160. const char *old;
  1161. int ret;
  1162. /* Inserting breakpoints requires accessing memory. Allow it for the
  1163. duration of this function. */
  1164. old = replay_memory_access;
  1165. replay_memory_access = replay_memory_access_read_write;
  1166. ret = 0;
  1167. try
  1168. {
  1169. ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
  1170. }
  1171. catch (const gdb_exception &except)
  1172. {
  1173. replay_memory_access = old;
  1174. throw;
  1175. }
  1176. replay_memory_access = old;
  1177. return ret;
  1178. }
  1179. /* The remove_breakpoint method of target record-btrace. */
  1180. int
  1181. record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
  1182. struct bp_target_info *bp_tgt,
  1183. enum remove_bp_reason reason)
  1184. {
  1185. const char *old;
  1186. int ret;
  1187. /* Removing breakpoints requires accessing memory. Allow it for the
  1188. duration of this function. */
  1189. old = replay_memory_access;
  1190. replay_memory_access = replay_memory_access_read_write;
  1191. ret = 0;
  1192. try
  1193. {
  1194. ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
  1195. }
  1196. catch (const gdb_exception &except)
  1197. {
  1198. replay_memory_access = old;
  1199. throw;
  1200. }
  1201. replay_memory_access = old;
  1202. return ret;
  1203. }
  1204. /* The fetch_registers method of target record-btrace. */
  1205. void
  1206. record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
  1207. {
  1208. btrace_insn_iterator *replay = nullptr;
  1209. /* Thread-db may ask for a thread's registers before GDB knows about the
  1210. thread. We forward the request to the target beneath in this
  1211. case. */
  1212. thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
  1213. if (tp != nullptr)
  1214. replay = tp->btrace.replay;
  1215. if (replay != nullptr && !record_btrace_generating_corefile)
  1216. {
  1217. const struct btrace_insn *insn;
  1218. struct gdbarch *gdbarch;
  1219. int pcreg;
  1220. gdbarch = regcache->arch ();
  1221. pcreg = gdbarch_pc_regnum (gdbarch);
  1222. if (pcreg < 0)
  1223. return;
  1224. /* We can only provide the PC register. */
  1225. if (regno >= 0 && regno != pcreg)
  1226. return;
  1227. insn = btrace_insn_get (replay);
  1228. gdb_assert (insn != NULL);
  1229. regcache->raw_supply (regno, &insn->pc);
  1230. }
  1231. else
  1232. this->beneath ()->fetch_registers (regcache, regno);
  1233. }
  1234. /* The store_registers method of target record-btrace. */
  1235. void
  1236. record_btrace_target::store_registers (struct regcache *regcache, int regno)
  1237. {
  1238. if (!record_btrace_generating_corefile
  1239. && record_is_replaying (regcache->ptid ()))
  1240. error (_("Cannot write registers while replaying."));
  1241. gdb_assert (may_write_registers);
  1242. this->beneath ()->store_registers (regcache, regno);
  1243. }
  1244. /* The prepare_to_store method of target record-btrace. */
  1245. void
  1246. record_btrace_target::prepare_to_store (struct regcache *regcache)
  1247. {
  1248. if (!record_btrace_generating_corefile
  1249. && record_is_replaying (regcache->ptid ()))
  1250. return;
  1251. this->beneath ()->prepare_to_store (regcache);
  1252. }
  1253. /* The branch trace frame cache. */
  1254. struct btrace_frame_cache
  1255. {
  1256. /* The thread. */
  1257. struct thread_info *tp;
  1258. /* The frame info. */
  1259. struct frame_info *frame;
  1260. /* The branch trace function segment. */
  1261. const struct btrace_function *bfun;
  1262. };
  1263. /* A struct btrace_frame_cache hash table indexed by NEXT. */
  1264. static htab_t bfcache;
  1265. /* hash_f for htab_create_alloc of bfcache. */
  1266. static hashval_t
  1267. bfcache_hash (const void *arg)
  1268. {
  1269. const struct btrace_frame_cache *cache
  1270. = (const struct btrace_frame_cache *) arg;
  1271. return htab_hash_pointer (cache->frame);
  1272. }
  1273. /* eq_f for htab_create_alloc of bfcache. */
  1274. static int
  1275. bfcache_eq (const void *arg1, const void *arg2)
  1276. {
  1277. const struct btrace_frame_cache *cache1
  1278. = (const struct btrace_frame_cache *) arg1;
  1279. const struct btrace_frame_cache *cache2
  1280. = (const struct btrace_frame_cache *) arg2;
  1281. return cache1->frame == cache2->frame;
  1282. }
  1283. /* Create a new btrace frame cache. */
  1284. static struct btrace_frame_cache *
  1285. bfcache_new (struct frame_info *frame)
  1286. {
  1287. struct btrace_frame_cache *cache;
  1288. void **slot;
  1289. cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
  1290. cache->frame = frame;
  1291. slot = htab_find_slot (bfcache, cache, INSERT);
  1292. gdb_assert (*slot == NULL);
  1293. *slot = cache;
  1294. return cache;
  1295. }
  1296. /* Extract the branch trace function from a branch trace frame. */
  1297. static const struct btrace_function *
  1298. btrace_get_frame_function (struct frame_info *frame)
  1299. {
  1300. const struct btrace_frame_cache *cache;
  1301. struct btrace_frame_cache pattern;
  1302. void **slot;
  1303. pattern.frame = frame;
  1304. slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
  1305. if (slot == NULL)
  1306. return NULL;
  1307. cache = (const struct btrace_frame_cache *) *slot;
  1308. return cache->bfun;
  1309. }
  1310. /* Implement stop_reason method for record_btrace_frame_unwind. */
  1311. static enum unwind_stop_reason
  1312. record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
  1313. void **this_cache)
  1314. {
  1315. const struct btrace_frame_cache *cache;
  1316. const struct btrace_function *bfun;
  1317. cache = (const struct btrace_frame_cache *) *this_cache;
  1318. bfun = cache->bfun;
  1319. gdb_assert (bfun != NULL);
  1320. if (bfun->up == 0)
  1321. return UNWIND_UNAVAILABLE;
  1322. return UNWIND_NO_REASON;
  1323. }
  1324. /* Implement this_id method for record_btrace_frame_unwind. */
  1325. static void
  1326. record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
  1327. struct frame_id *this_id)
  1328. {
  1329. const struct btrace_frame_cache *cache;
  1330. const struct btrace_function *bfun;
  1331. struct btrace_call_iterator it;
  1332. CORE_ADDR code, special;
  1333. cache = (const struct btrace_frame_cache *) *this_cache;
  1334. bfun = cache->bfun;
  1335. gdb_assert (bfun != NULL);
  1336. while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
  1337. bfun = btrace_call_get (&it);
  1338. code = get_frame_func (this_frame);
  1339. special = bfun->number;
  1340. *this_id = frame_id_build_unavailable_stack_special (code, special);
  1341. DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
  1342. btrace_get_bfun_name (cache->bfun),
  1343. core_addr_to_string_nz (this_id->code_addr),
  1344. core_addr_to_string_nz (this_id->special_addr));
  1345. }
  1346. /* Implement prev_register method for record_btrace_frame_unwind. */
  1347. static struct value *
  1348. record_btrace_frame_prev_register (struct frame_info *this_frame,
  1349. void **this_cache,
  1350. int regnum)
  1351. {
  1352. const struct btrace_frame_cache *cache;
  1353. const struct btrace_function *bfun, *caller;
  1354. struct btrace_call_iterator it;
  1355. struct gdbarch *gdbarch;
  1356. CORE_ADDR pc;
  1357. int pcreg;
  1358. gdbarch = get_frame_arch (this_frame);
  1359. pcreg = gdbarch_pc_regnum (gdbarch);
  1360. if (pcreg < 0 || regnum != pcreg)
  1361. throw_error (NOT_AVAILABLE_ERROR,
  1362. _("Registers are not available in btrace record history"));
  1363. cache = (const struct btrace_frame_cache *) *this_cache;
  1364. bfun = cache->bfun;
  1365. gdb_assert (bfun != NULL);
  1366. if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
  1367. throw_error (NOT_AVAILABLE_ERROR,
  1368. _("No caller in btrace record history"));
  1369. caller = btrace_call_get (&it);
  1370. if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
  1371. pc = caller->insn.front ().pc;
  1372. else
  1373. {
  1374. pc = caller->insn.back ().pc;
  1375. pc += gdb_insn_length (gdbarch, pc);
  1376. }
  1377. DEBUG ("[frame] unwound PC in %s on level %d: %s",
  1378. btrace_get_bfun_name (bfun), bfun->level,
  1379. core_addr_to_string_nz (pc));
  1380. return frame_unwind_got_address (this_frame, regnum, pc);
  1381. }
  1382. /* Implement sniffer method for record_btrace_frame_unwind. */
  1383. static int
  1384. record_btrace_frame_sniffer (const struct frame_unwind *self,
  1385. struct frame_info *this_frame,
  1386. void **this_cache)
  1387. {
  1388. const struct btrace_function *bfun;
  1389. struct btrace_frame_cache *cache;
  1390. struct thread_info *tp;
  1391. struct frame_info *next;
  1392. /* THIS_FRAME does not contain a reference to its thread. */
  1393. tp = inferior_thread ();
  1394. bfun = NULL;
  1395. next = get_next_frame (this_frame);
  1396. if (next == NULL)
  1397. {
  1398. const struct btrace_insn_iterator *replay;
  1399. replay = tp->btrace.replay;
  1400. if (replay != NULL)
  1401. bfun = &replay->btinfo->functions[replay->call_index];
  1402. }
  1403. else
  1404. {
  1405. const struct btrace_function *callee;
  1406. struct btrace_call_iterator it;
  1407. callee = btrace_get_frame_function (next);
  1408. if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
  1409. return 0;
  1410. if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
  1411. return 0;
  1412. bfun = btrace_call_get (&it);
  1413. }
  1414. if (bfun == NULL)
  1415. return 0;
  1416. DEBUG ("[frame] sniffed frame for %s on level %d",
  1417. btrace_get_bfun_name (bfun), bfun->level);
  1418. /* This is our frame. Initialize the frame cache. */
  1419. cache = bfcache_new (this_frame);
  1420. cache->tp = tp;
  1421. cache->bfun = bfun;
  1422. *this_cache = cache;
  1423. return 1;
  1424. }
  1425. /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
  1426. static int
  1427. record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
  1428. struct frame_info *this_frame,
  1429. void **this_cache)
  1430. {
  1431. const struct btrace_function *bfun, *callee;
  1432. struct btrace_frame_cache *cache;
  1433. struct btrace_call_iterator it;
  1434. struct frame_info *next;
  1435. struct thread_info *tinfo;
  1436. next = get_next_frame (this_frame);
  1437. if (next == NULL)
  1438. return 0;
  1439. callee = btrace_get_frame_function (next);
  1440. if (callee == NULL)
  1441. return 0;
  1442. if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
  1443. return 0;
  1444. tinfo = inferior_thread ();
  1445. if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
  1446. return 0;
  1447. bfun = btrace_call_get (&it);
  1448. DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
  1449. btrace_get_bfun_name (bfun), bfun->level);
  1450. /* This is our frame. Initialize the frame cache. */
  1451. cache = bfcache_new (this_frame);
  1452. cache->tp = tinfo;
  1453. cache->bfun = bfun;
  1454. *this_cache = cache;
  1455. return 1;
  1456. }
  1457. static void
  1458. record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
  1459. {
  1460. struct btrace_frame_cache *cache;
  1461. void **slot;
  1462. cache = (struct btrace_frame_cache *) this_cache;
  1463. slot = htab_find_slot (bfcache, cache, NO_INSERT);
  1464. gdb_assert (slot != NULL);
  1465. htab_remove_elt (bfcache, cache);
  1466. }
  1467. /* btrace recording does not store previous memory content, neither the stack
  1468. frames content. Any unwinding would return erroneous results as the stack
  1469. contents no longer matches the changed PC value restored from history.
  1470. Therefore this unwinder reports any possibly unwound registers as
  1471. <unavailable>. */
  1472. const struct frame_unwind record_btrace_frame_unwind =
  1473. {
  1474. "record-btrace",
  1475. NORMAL_FRAME,
  1476. record_btrace_frame_unwind_stop_reason,
  1477. record_btrace_frame_this_id,
  1478. record_btrace_frame_prev_register,
  1479. NULL,
  1480. record_btrace_frame_sniffer,
  1481. record_btrace_frame_dealloc_cache
  1482. };
  1483. const struct frame_unwind record_btrace_tailcall_frame_unwind =
  1484. {
  1485. "record-btrace tailcall",
  1486. TAILCALL_FRAME,
  1487. record_btrace_frame_unwind_stop_reason,
  1488. record_btrace_frame_this_id,
  1489. record_btrace_frame_prev_register,
  1490. NULL,
  1491. record_btrace_tailcall_frame_sniffer,
  1492. record_btrace_frame_dealloc_cache
  1493. };
  1494. /* Implement the get_unwinder method. */
  1495. const struct frame_unwind *
  1496. record_btrace_target::get_unwinder ()
  1497. {
  1498. return &record_btrace_frame_unwind;
  1499. }
  1500. /* Implement the get_tailcall_unwinder method. */
  1501. const struct frame_unwind *
  1502. record_btrace_target::get_tailcall_unwinder ()
  1503. {
  1504. return &record_btrace_tailcall_frame_unwind;
  1505. }
  1506. /* Return a human-readable string for FLAG. */
  1507. static const char *
  1508. btrace_thread_flag_to_str (btrace_thread_flags flag)
  1509. {
  1510. switch (flag)
  1511. {
  1512. case BTHR_STEP:
  1513. return "step";
  1514. case BTHR_RSTEP:
  1515. return "reverse-step";
  1516. case BTHR_CONT:
  1517. return "cont";
  1518. case BTHR_RCONT:
  1519. return "reverse-cont";
  1520. case BTHR_STOP:
  1521. return "stop";
  1522. }
  1523. return "<invalid>";
  1524. }
  1525. /* Indicate that TP should be resumed according to FLAG. */
  1526. static void
  1527. record_btrace_resume_thread (struct thread_info *tp,
  1528. enum btrace_thread_flag flag)
  1529. {
  1530. struct btrace_thread_info *btinfo;
  1531. DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
  1532. tp->ptid.to_string ().c_str (), flag,
  1533. btrace_thread_flag_to_str (flag));
  1534. btinfo = &tp->btrace;
  1535. /* Fetch the latest branch trace. */
  1536. btrace_fetch (tp, record_btrace_get_cpu ());
  1537. /* A resume request overwrites a preceding resume or stop request. */
  1538. btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
  1539. btinfo->flags |= flag;
  1540. }
  1541. /* Get the current frame for TP. */
  1542. static struct frame_id
  1543. get_thread_current_frame_id (struct thread_info *tp)
  1544. {
  1545. /* Set current thread, which is implicitly used by
  1546. get_current_frame. */
  1547. scoped_restore_current_thread restore_thread;
  1548. switch_to_thread (tp);
  1549. process_stratum_target *proc_target = tp->inf->process_target ();
  1550. /* Clear the executing flag to allow changes to the current frame.
  1551. We are not actually running, yet. We just started a reverse execution
  1552. command or a record goto command.
  1553. For the latter, EXECUTING is false and this has no effect.
  1554. For the former, EXECUTING is true and we're in wait, about to
  1555. move the thread. Since we need to recompute the stack, we temporarily
  1556. set EXECUTING to false. */
  1557. bool executing = tp->executing ();
  1558. set_executing (proc_target, inferior_ptid, false);
  1559. SCOPE_EXIT
  1560. {
  1561. set_executing (proc_target, inferior_ptid, executing);
  1562. };
  1563. return get_frame_id (get_current_frame ());
  1564. }
  1565. /* Start replaying a thread. */
  1566. static struct btrace_insn_iterator *
  1567. record_btrace_start_replaying (struct thread_info *tp)
  1568. {
  1569. struct btrace_insn_iterator *replay;
  1570. struct btrace_thread_info *btinfo;
  1571. btinfo = &tp->btrace;
  1572. replay = NULL;
  1573. /* We can't start replaying without trace. */
  1574. if (btinfo->functions.empty ())
  1575. return NULL;
  1576. /* GDB stores the current frame_id when stepping in order to detects steps
  1577. into subroutines.
  1578. Since frames are computed differently when we're replaying, we need to
  1579. recompute those stored frames and fix them up so we can still detect
  1580. subroutines after we started replaying. */
  1581. try
  1582. {
  1583. struct frame_id frame_id;
  1584. int upd_step_frame_id, upd_step_stack_frame_id;
  1585. /* The current frame without replaying - computed via normal unwind. */
  1586. frame_id = get_thread_current_frame_id (tp);
  1587. /* Check if we need to update any stepping-related frame id's. */
  1588. upd_step_frame_id = frame_id_eq (frame_id,
  1589. tp->control.step_frame_id);
  1590. upd_step_stack_frame_id = frame_id_eq (frame_id,
  1591. tp->control.step_stack_frame_id);
  1592. /* We start replaying at the end of the branch trace. This corresponds
  1593. to the current instruction. */
  1594. replay = XNEW (struct btrace_insn_iterator);
  1595. btrace_insn_end (replay, btinfo);
  1596. /* Skip gaps at the end of the trace. */
  1597. while (btrace_insn_get (replay) == NULL)
  1598. {
  1599. unsigned int steps;
  1600. steps = btrace_insn_prev (replay, 1);
  1601. if (steps == 0)
  1602. error (_("No trace."));
  1603. }
  1604. /* We're not replaying, yet. */
  1605. gdb_assert (btinfo->replay == NULL);
  1606. btinfo->replay = replay;
  1607. /* Make sure we're not using any stale registers. */
  1608. registers_changed_thread (tp);
  1609. /* The current frame with replaying - computed via btrace unwind. */
  1610. frame_id = get_thread_current_frame_id (tp);
  1611. /* Replace stepping related frames where necessary. */
  1612. if (upd_step_frame_id)
  1613. tp->control.step_frame_id = frame_id;
  1614. if (upd_step_stack_frame_id)
  1615. tp->control.step_stack_frame_id = frame_id;
  1616. }
  1617. catch (const gdb_exception &except)
  1618. {
  1619. xfree (btinfo->replay);
  1620. btinfo->replay = NULL;
  1621. registers_changed_thread (tp);
  1622. throw;
  1623. }
  1624. return replay;
  1625. }
  1626. /* Stop replaying a thread. */
  1627. static void
  1628. record_btrace_stop_replaying (struct thread_info *tp)
  1629. {
  1630. struct btrace_thread_info *btinfo;
  1631. btinfo = &tp->btrace;
  1632. xfree (btinfo->replay);
  1633. btinfo->replay = NULL;
  1634. /* Make sure we're not leaving any stale registers. */
  1635. registers_changed_thread (tp);
  1636. }
  1637. /* Stop replaying TP if it is at the end of its execution history. */
  1638. static void
  1639. record_btrace_stop_replaying_at_end (struct thread_info *tp)
  1640. {
  1641. struct btrace_insn_iterator *replay, end;
  1642. struct btrace_thread_info *btinfo;
  1643. btinfo = &tp->btrace;
  1644. replay = btinfo->replay;
  1645. if (replay == NULL)
  1646. return;
  1647. btrace_insn_end (&end, btinfo);
  1648. if (btrace_insn_cmp (replay, &end) == 0)
  1649. record_btrace_stop_replaying (tp);
  1650. }
  1651. /* The resume method of target record-btrace. */
  1652. void
  1653. record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
  1654. {
  1655. enum btrace_thread_flag flag, cflag;
  1656. DEBUG ("resume %s: %s%s", ptid.to_string ().c_str (),
  1657. ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
  1658. step ? "step" : "cont");
  1659. /* Store the execution direction of the last resume.
  1660. If there is more than one resume call, we have to rely on infrun
  1661. to not change the execution direction in-between. */
  1662. record_btrace_resume_exec_dir = ::execution_direction;
  1663. /* As long as we're not replaying, just forward the request.
  1664. For non-stop targets this means that no thread is replaying. In order to
  1665. make progress, we may need to explicitly move replaying threads to the end
  1666. of their execution history. */
  1667. if ((::execution_direction != EXEC_REVERSE)
  1668. && !record_is_replaying (minus_one_ptid))
  1669. {
  1670. this->beneath ()->resume (ptid, step, signal);
  1671. return;
  1672. }
  1673. /* Compute the btrace thread flag for the requested move. */
  1674. if (::execution_direction == EXEC_REVERSE)
  1675. {
  1676. flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
  1677. cflag = BTHR_RCONT;
  1678. }
  1679. else
  1680. {
  1681. flag = step == 0 ? BTHR_CONT : BTHR_STEP;
  1682. cflag = BTHR_CONT;
  1683. }
  1684. /* We just indicate the resume intent here. The actual stepping happens in
  1685. record_btrace_wait below.
  1686. For all-stop targets, we only step INFERIOR_PTID and continue others. */
  1687. process_stratum_target *proc_target = current_inferior ()->process_target ();
  1688. if (!target_is_non_stop_p ())
  1689. {
  1690. gdb_assert (inferior_ptid.matches (ptid));
  1691. for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
  1692. {
  1693. if (tp->ptid.matches (inferior_ptid))
  1694. record_btrace_resume_thread (tp, flag);
  1695. else
  1696. record_btrace_resume_thread (tp, cflag);
  1697. }
  1698. }
  1699. else
  1700. {
  1701. for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
  1702. record_btrace_resume_thread (tp, flag);
  1703. }
  1704. /* Async support. */
  1705. if (target_can_async_p ())
  1706. {
  1707. target_async (1);
  1708. mark_async_event_handler (record_btrace_async_inferior_event_handler);
  1709. }
  1710. }
  1711. /* Cancel resuming TP. */
  1712. static void
  1713. record_btrace_cancel_resume (struct thread_info *tp)
  1714. {
  1715. btrace_thread_flags flags;
  1716. flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
  1717. if (flags == 0)
  1718. return;
  1719. DEBUG ("cancel resume thread %s (%s): %x (%s)",
  1720. print_thread_id (tp),
  1721. tp->ptid.to_string ().c_str (), flags.raw (),
  1722. btrace_thread_flag_to_str (flags));
  1723. tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
  1724. record_btrace_stop_replaying_at_end (tp);
  1725. }
  1726. /* Return a target_waitstatus indicating that we ran out of history. */
  1727. static struct target_waitstatus
  1728. btrace_step_no_history (void)
  1729. {
  1730. struct target_waitstatus status;
  1731. status.set_no_history ();
  1732. return status;
  1733. }
  1734. /* Return a target_waitstatus indicating that a step finished. */
  1735. static struct target_waitstatus
  1736. btrace_step_stopped (void)
  1737. {
  1738. struct target_waitstatus status;
  1739. status.set_stopped (GDB_SIGNAL_TRAP);
  1740. return status;
  1741. }
  1742. /* Return a target_waitstatus indicating that a thread was stopped as
  1743. requested. */
  1744. static struct target_waitstatus
  1745. btrace_step_stopped_on_request (void)
  1746. {
  1747. struct target_waitstatus status;
  1748. status.set_stopped (GDB_SIGNAL_0);
  1749. return status;
  1750. }
  1751. /* Return a target_waitstatus indicating a spurious stop. */
  1752. static struct target_waitstatus
  1753. btrace_step_spurious (void)
  1754. {
  1755. struct target_waitstatus status;
  1756. status.set_spurious ();
  1757. return status;
  1758. }
  1759. /* Return a target_waitstatus indicating that the thread was not resumed. */
  1760. static struct target_waitstatus
  1761. btrace_step_no_resumed (void)
  1762. {
  1763. struct target_waitstatus status;
  1764. status.set_no_resumed ();
  1765. return status;
  1766. }
  1767. /* Return a target_waitstatus indicating that we should wait again. */
  1768. static struct target_waitstatus
  1769. btrace_step_again (void)
  1770. {
  1771. struct target_waitstatus status;
  1772. status.set_ignore ();
  1773. return status;
  1774. }
  1775. /* Clear the record histories. */
  1776. static void
  1777. record_btrace_clear_histories (struct btrace_thread_info *btinfo)
  1778. {
  1779. xfree (btinfo->insn_history);
  1780. xfree (btinfo->call_history);
  1781. btinfo->insn_history = NULL;
  1782. btinfo->call_history = NULL;
  1783. }
  1784. /* Check whether TP's current replay position is at a breakpoint. */
  1785. static int
  1786. record_btrace_replay_at_breakpoint (struct thread_info *tp)
  1787. {
  1788. struct btrace_insn_iterator *replay;
  1789. struct btrace_thread_info *btinfo;
  1790. const struct btrace_insn *insn;
  1791. btinfo = &tp->btrace;
  1792. replay = btinfo->replay;
  1793. if (replay == NULL)
  1794. return 0;
  1795. insn = btrace_insn_get (replay);
  1796. if (insn == NULL)
  1797. return 0;
  1798. return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
  1799. &btinfo->stop_reason);
  1800. }
  1801. /* Step one instruction in forward direction. */
  1802. static struct target_waitstatus
  1803. record_btrace_single_step_forward (struct thread_info *tp)
  1804. {
  1805. struct btrace_insn_iterator *replay, end, start;
  1806. struct btrace_thread_info *btinfo;
  1807. btinfo = &tp->btrace;
  1808. replay = btinfo->replay;
  1809. /* We're done if we're not replaying. */
  1810. if (replay == NULL)
  1811. return btrace_step_no_history ();
  1812. /* Check if we're stepping a breakpoint. */
  1813. if (record_btrace_replay_at_breakpoint (tp))
  1814. return btrace_step_stopped ();
  1815. /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
  1816. jump back to the instruction at which we started. */
  1817. start = *replay;
  1818. do
  1819. {
  1820. unsigned int steps;
  1821. /* We will bail out here if we continue stepping after reaching the end
  1822. of the execution history. */
  1823. steps = btrace_insn_next (replay, 1);
  1824. if (steps == 0)
  1825. {
  1826. *replay = start;
  1827. return btrace_step_no_history ();
  1828. }
  1829. }
  1830. while (btrace_insn_get (replay) == NULL);
  1831. /* Determine the end of the instruction trace. */
  1832. btrace_insn_end (&end, btinfo);
  1833. /* The execution trace contains (and ends with) the current instruction.
  1834. This instruction has not been executed, yet, so the trace really ends
  1835. one instruction earlier. */
  1836. if (btrace_insn_cmp (replay, &end) == 0)
  1837. return btrace_step_no_history ();
  1838. return btrace_step_spurious ();
  1839. }
  1840. /* Step one instruction in backward direction. */
  1841. static struct target_waitstatus
  1842. record_btrace_single_step_backward (struct thread_info *tp)
  1843. {
  1844. struct btrace_insn_iterator *replay, start;
  1845. struct btrace_thread_info *btinfo;
  1846. btinfo = &tp->btrace;
  1847. replay = btinfo->replay;
  1848. /* Start replaying if we're not already doing so. */
  1849. if (replay == NULL)
  1850. replay = record_btrace_start_replaying (tp);
  1851. /* If we can't step any further, we reached the end of the history.
  1852. Skip gaps during replay. If we end up at a gap (at the beginning of
  1853. the trace), jump back to the instruction at which we started. */
  1854. start = *replay;
  1855. do
  1856. {
  1857. unsigned int steps;
  1858. steps = btrace_insn_prev (replay, 1);
  1859. if (steps == 0)
  1860. {
  1861. *replay = start;
  1862. return btrace_step_no_history ();
  1863. }
  1864. }
  1865. while (btrace_insn_get (replay) == NULL);
  1866. /* Check if we're stepping a breakpoint.
  1867. For reverse-stepping, this check is after the step. There is logic in
  1868. infrun.c that handles reverse-stepping separately. See, for example,
  1869. proceed and adjust_pc_after_break.
  1870. This code assumes that for reverse-stepping, PC points to the last
  1871. de-executed instruction, whereas for forward-stepping PC points to the
  1872. next to-be-executed instruction. */
  1873. if (record_btrace_replay_at_breakpoint (tp))
  1874. return btrace_step_stopped ();
  1875. return btrace_step_spurious ();
  1876. }
  1877. /* Step a single thread. */
  1878. static struct target_waitstatus
  1879. record_btrace_step_thread (struct thread_info *tp)
  1880. {
  1881. struct btrace_thread_info *btinfo;
  1882. struct target_waitstatus status;
  1883. btrace_thread_flags flags;
  1884. btinfo = &tp->btrace;
  1885. flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
  1886. btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
  1887. DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
  1888. tp->ptid.to_string ().c_str (), flags.raw (),
  1889. btrace_thread_flag_to_str (flags));
  1890. /* We can't step without an execution history. */
  1891. if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
  1892. return btrace_step_no_history ();
  1893. switch (flags)
  1894. {
  1895. default:
  1896. internal_error (__FILE__, __LINE__, _("invalid stepping type."));
  1897. case BTHR_STOP:
  1898. return btrace_step_stopped_on_request ();
  1899. case BTHR_STEP:
  1900. status = record_btrace_single_step_forward (tp);
  1901. if (status.kind () != TARGET_WAITKIND_SPURIOUS)
  1902. break;
  1903. return btrace_step_stopped ();
  1904. case BTHR_RSTEP:
  1905. status = record_btrace_single_step_backward (tp);
  1906. if (status.kind () != TARGET_WAITKIND_SPURIOUS)
  1907. break;
  1908. return btrace_step_stopped ();
  1909. case BTHR_CONT:
  1910. status = record_btrace_single_step_forward (tp);
  1911. if (status.kind () != TARGET_WAITKIND_SPURIOUS)
  1912. break;
  1913. btinfo->flags |= flags;
  1914. return btrace_step_again ();
  1915. case BTHR_RCONT:
  1916. status = record_btrace_single_step_backward (tp);
  1917. if (status.kind () != TARGET_WAITKIND_SPURIOUS)
  1918. break;
  1919. btinfo->flags |= flags;
  1920. return btrace_step_again ();
  1921. }
  1922. /* We keep threads moving at the end of their execution history. The wait
  1923. method will stop the thread for whom the event is reported. */
  1924. if (status.kind () == TARGET_WAITKIND_NO_HISTORY)
  1925. btinfo->flags |= flags;
  1926. return status;
  1927. }
  1928. /* Announce further events if necessary. */
  1929. static void
  1930. record_btrace_maybe_mark_async_event
  1931. (const std::vector<thread_info *> &moving,
  1932. const std::vector<thread_info *> &no_history)
  1933. {
  1934. bool more_moving = !moving.empty ();
  1935. bool more_no_history = !no_history.empty ();;
  1936. if (!more_moving && !more_no_history)
  1937. return;
  1938. if (more_moving)
  1939. DEBUG ("movers pending");
  1940. if (more_no_history)
  1941. DEBUG ("no-history pending");
  1942. mark_async_event_handler (record_btrace_async_inferior_event_handler);
  1943. }
  1944. /* The wait method of target record-btrace. */
  1945. ptid_t
  1946. record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
  1947. target_wait_flags options)
  1948. {
  1949. std::vector<thread_info *> moving;
  1950. std::vector<thread_info *> no_history;
  1951. /* Clear this, if needed we'll re-mark it below. */
  1952. clear_async_event_handler (record_btrace_async_inferior_event_handler);
  1953. DEBUG ("wait %s (0x%x)", ptid.to_string ().c_str (),
  1954. (unsigned) options);
  1955. /* As long as we're not replaying, just forward the request. */
  1956. if ((::execution_direction != EXEC_REVERSE)
  1957. && !record_is_replaying (minus_one_ptid))
  1958. {
  1959. return this->beneath ()->wait (ptid, status, options);
  1960. }
  1961. /* Keep a work list of moving threads. */
  1962. process_stratum_target *proc_target = current_inferior ()->process_target ();
  1963. for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
  1964. if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
  1965. moving.push_back (tp);
  1966. if (moving.empty ())
  1967. {
  1968. *status = btrace_step_no_resumed ();
  1969. DEBUG ("wait ended by %s: %s", null_ptid.to_string ().c_str (),
  1970. status->to_string ().c_str ());
  1971. return null_ptid;
  1972. }
  1973. /* Step moving threads one by one, one step each, until either one thread
  1974. reports an event or we run out of threads to step.
  1975. When stepping more than one thread, chances are that some threads reach
  1976. the end of their execution history earlier than others. If we reported
  1977. this immediately, all-stop on top of non-stop would stop all threads and
  1978. resume the same threads next time. And we would report the same thread
  1979. having reached the end of its execution history again.
  1980. In the worst case, this would starve the other threads. But even if other
  1981. threads would be allowed to make progress, this would result in far too
  1982. many intermediate stops.
  1983. We therefore delay the reporting of "no execution history" until we have
  1984. nothing else to report. By this time, all threads should have moved to
  1985. either the beginning or the end of their execution history. There will
  1986. be a single user-visible stop. */
  1987. struct thread_info *eventing = NULL;
  1988. while ((eventing == NULL) && !moving.empty ())
  1989. {
  1990. for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
  1991. {
  1992. thread_info *tp = moving[ix];
  1993. *status = record_btrace_step_thread (tp);
  1994. switch (status->kind ())
  1995. {
  1996. case TARGET_WAITKIND_IGNORE:
  1997. ix++;
  1998. break;
  1999. case TARGET_WAITKIND_NO_HISTORY:
  2000. no_history.push_back (ordered_remove (moving, ix));
  2001. break;
  2002. default:
  2003. eventing = unordered_remove (moving, ix);
  2004. break;
  2005. }
  2006. }
  2007. }
  2008. if (eventing == NULL)
  2009. {
  2010. /* We started with at least one moving thread. This thread must have
  2011. either stopped or reached the end of its execution history.
  2012. In the former case, EVENTING must not be NULL.
  2013. In the latter case, NO_HISTORY must not be empty. */
  2014. gdb_assert (!no_history.empty ());
  2015. /* We kept threads moving at the end of their execution history. Stop
  2016. EVENTING now that we are going to report its stop. */
  2017. eventing = unordered_remove (no_history, 0);
  2018. eventing->btrace.flags &= ~BTHR_MOVE;
  2019. *status = btrace_step_no_history ();
  2020. }
  2021. gdb_assert (eventing != NULL);
  2022. /* We kept threads replaying at the end of their execution history. Stop
  2023. replaying EVENTING now that we are going to report its stop. */
  2024. record_btrace_stop_replaying_at_end (eventing);
  2025. /* Stop all other threads. */
  2026. if (!target_is_non_stop_p ())
  2027. {
  2028. for (thread_info *tp : current_inferior ()->non_exited_threads ())
  2029. record_btrace_cancel_resume (tp);
  2030. }
  2031. /* In async mode, we need to announce further events. */
  2032. if (target_is_async_p ())
  2033. record_btrace_maybe_mark_async_event (moving, no_history);
  2034. /* Start record histories anew from the current position. */
  2035. record_btrace_clear_histories (&eventing->btrace);
  2036. /* We moved the replay position but did not update registers. */
  2037. registers_changed_thread (eventing);
  2038. DEBUG ("wait ended by thread %s (%s): %s",
  2039. print_thread_id (eventing),
  2040. eventing->ptid.to_string ().c_str (),
  2041. status->to_string ().c_str ());
  2042. return eventing->ptid;
  2043. }
  2044. /* The stop method of target record-btrace. */
  2045. void
  2046. record_btrace_target::stop (ptid_t ptid)
  2047. {
  2048. DEBUG ("stop %s", ptid.to_string ().c_str ());
  2049. /* As long as we're not replaying, just forward the request. */
  2050. if ((::execution_direction != EXEC_REVERSE)
  2051. && !record_is_replaying (minus_one_ptid))
  2052. {
  2053. this->beneath ()->stop (ptid);
  2054. }
  2055. else
  2056. {
  2057. process_stratum_target *proc_target
  2058. = current_inferior ()->process_target ();
  2059. for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
  2060. {
  2061. tp->btrace.flags &= ~BTHR_MOVE;
  2062. tp->btrace.flags |= BTHR_STOP;
  2063. }
  2064. }
  2065. }
  2066. /* The can_execute_reverse method of target record-btrace. */
  2067. bool
  2068. record_btrace_target::can_execute_reverse ()
  2069. {
  2070. return true;
  2071. }
  2072. /* The stopped_by_sw_breakpoint method of target record-btrace. */
  2073. bool
  2074. record_btrace_target::stopped_by_sw_breakpoint ()
  2075. {
  2076. if (record_is_replaying (minus_one_ptid))
  2077. {
  2078. struct thread_info *tp = inferior_thread ();
  2079. return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
  2080. }
  2081. return this->beneath ()->stopped_by_sw_breakpoint ();
  2082. }
  2083. /* The supports_stopped_by_sw_breakpoint method of target
  2084. record-btrace. */
  2085. bool
  2086. record_btrace_target::supports_stopped_by_sw_breakpoint ()
  2087. {
  2088. if (record_is_replaying (minus_one_ptid))
  2089. return true;
  2090. return this->beneath ()->supports_stopped_by_sw_breakpoint ();
  2091. }
  2092. /* The stopped_by_sw_breakpoint method of target record-btrace. */
  2093. bool
  2094. record_btrace_target::stopped_by_hw_breakpoint ()
  2095. {
  2096. if (record_is_replaying (minus_one_ptid))
  2097. {
  2098. struct thread_info *tp = inferior_thread ();
  2099. return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
  2100. }
  2101. return this->beneath ()->stopped_by_hw_breakpoint ();
  2102. }
  2103. /* The supports_stopped_by_hw_breakpoint method of target
  2104. record-btrace. */
  2105. bool
  2106. record_btrace_target::supports_stopped_by_hw_breakpoint ()
  2107. {
  2108. if (record_is_replaying (minus_one_ptid))
  2109. return true;
  2110. return this->beneath ()->supports_stopped_by_hw_breakpoint ();
  2111. }
  2112. /* The update_thread_list method of target record-btrace. */
  2113. void
  2114. record_btrace_target::update_thread_list ()
  2115. {
  2116. /* We don't add or remove threads during replay. */
  2117. if (record_is_replaying (minus_one_ptid))
  2118. return;
  2119. /* Forward the request. */
  2120. this->beneath ()->update_thread_list ();
  2121. }
  2122. /* The thread_alive method of target record-btrace. */
  2123. bool
  2124. record_btrace_target::thread_alive (ptid_t ptid)
  2125. {
  2126. /* We don't add or remove threads during replay. */
  2127. if (record_is_replaying (minus_one_ptid))
  2128. return true;
  2129. /* Forward the request. */
  2130. return this->beneath ()->thread_alive (ptid);
  2131. }
  2132. /* Set the replay branch trace instruction iterator. If IT is NULL, replay
  2133. is stopped. */
  2134. static void
  2135. record_btrace_set_replay (struct thread_info *tp,
  2136. const struct btrace_insn_iterator *it)
  2137. {
  2138. struct btrace_thread_info *btinfo;
  2139. btinfo = &tp->btrace;
  2140. if (it == NULL)
  2141. record_btrace_stop_replaying (tp);
  2142. else
  2143. {
  2144. if (btinfo->replay == NULL)
  2145. record_btrace_start_replaying (tp);
  2146. else if (btrace_insn_cmp (btinfo->replay, it) == 0)
  2147. return;
  2148. *btinfo->replay = *it;
  2149. registers_changed_thread (tp);
  2150. }
  2151. /* Start anew from the new replay position. */
  2152. record_btrace_clear_histories (btinfo);
  2153. inferior_thread ()->set_stop_pc (regcache_read_pc (get_current_regcache ()));
  2154. print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
  2155. }
  2156. /* The goto_record_begin method of target record-btrace. */
  2157. void
  2158. record_btrace_target::goto_record_begin ()
  2159. {
  2160. struct thread_info *tp;
  2161. struct btrace_insn_iterator begin;
  2162. tp = require_btrace_thread ();
  2163. btrace_insn_begin (&begin, &tp->btrace);
  2164. /* Skip gaps at the beginning of the trace. */
  2165. while (btrace_insn_get (&begin) == NULL)
  2166. {
  2167. unsigned int steps;
  2168. steps = btrace_insn_next (&begin, 1);
  2169. if (steps == 0)
  2170. error (_("No trace."));
  2171. }
  2172. record_btrace_set_replay (tp, &begin);
  2173. }
  2174. /* The goto_record_end method of target record-btrace. */
  2175. void
  2176. record_btrace_target::goto_record_end ()
  2177. {
  2178. struct thread_info *tp;
  2179. tp = require_btrace_thread ();
  2180. record_btrace_set_replay (tp, NULL);
  2181. }
  2182. /* The goto_record method of target record-btrace. */
  2183. void
  2184. record_btrace_target::goto_record (ULONGEST insn)
  2185. {
  2186. struct thread_info *tp;
  2187. struct btrace_insn_iterator it;
  2188. unsigned int number;
  2189. int found;
  2190. number = insn;
  2191. /* Check for wrap-arounds. */
  2192. if (number != insn)
  2193. error (_("Instruction number out of range."));
  2194. tp = require_btrace_thread ();
  2195. found = btrace_find_insn_by_number (&it, &tp->btrace, number);
  2196. /* Check if the instruction could not be found or is a gap. */
  2197. if (found == 0 || btrace_insn_get (&it) == NULL)
  2198. error (_("No such instruction."));
  2199. record_btrace_set_replay (tp, &it);
  2200. }
  2201. /* The record_stop_replaying method of target record-btrace. */
  2202. void
  2203. record_btrace_target::record_stop_replaying ()
  2204. {
  2205. for (thread_info *tp : current_inferior ()->non_exited_threads ())
  2206. record_btrace_stop_replaying (tp);
  2207. }
  2208. /* The execution_direction target method. */
  2209. enum exec_direction_kind
  2210. record_btrace_target::execution_direction ()
  2211. {
  2212. return record_btrace_resume_exec_dir;
  2213. }
  2214. /* The prepare_to_generate_core target method. */
  2215. void
  2216. record_btrace_target::prepare_to_generate_core ()
  2217. {
  2218. record_btrace_generating_corefile = 1;
  2219. }
  2220. /* The done_generating_core target method. */
  2221. void
  2222. record_btrace_target::done_generating_core ()
  2223. {
  2224. record_btrace_generating_corefile = 0;
  2225. }
  2226. /* Start recording in BTS format. */
  2227. static void
  2228. cmd_record_btrace_bts_start (const char *args, int from_tty)
  2229. {
  2230. if (args != NULL && *args != 0)
  2231. error (_("Invalid argument."));
  2232. record_btrace_conf.format = BTRACE_FORMAT_BTS;
  2233. try
  2234. {
  2235. execute_command ("target record-btrace", from_tty);
  2236. }
  2237. catch (const gdb_exception &exception)
  2238. {
  2239. record_btrace_conf.format = BTRACE_FORMAT_NONE;
  2240. throw;
  2241. }
  2242. }
  2243. /* Start recording in Intel Processor Trace format. */
  2244. static void
  2245. cmd_record_btrace_pt_start (const char *args, int from_tty)
  2246. {
  2247. if (args != NULL && *args != 0)
  2248. error (_("Invalid argument."));
  2249. record_btrace_conf.format = BTRACE_FORMAT_PT;
  2250. try
  2251. {
  2252. execute_command ("target record-btrace", from_tty);
  2253. }
  2254. catch (const gdb_exception &exception)
  2255. {
  2256. record_btrace_conf.format = BTRACE_FORMAT_NONE;
  2257. throw;
  2258. }
  2259. }
  2260. /* Alias for "target record". */
  2261. static void
  2262. cmd_record_btrace_start (const char *args, int from_tty)
  2263. {
  2264. if (args != NULL && *args != 0)
  2265. error (_("Invalid argument."));
  2266. record_btrace_conf.format = BTRACE_FORMAT_PT;
  2267. try
  2268. {
  2269. execute_command ("target record-btrace", from_tty);
  2270. }
  2271. catch (const gdb_exception &exception)
  2272. {
  2273. record_btrace_conf.format = BTRACE_FORMAT_BTS;
  2274. try
  2275. {
  2276. execute_command ("target record-btrace", from_tty);
  2277. }
  2278. catch (const gdb_exception &ex)
  2279. {
  2280. record_btrace_conf.format = BTRACE_FORMAT_NONE;
  2281. throw;
  2282. }
  2283. }
  2284. }
  2285. /* The "show record btrace replay-memory-access" command. */
  2286. static void
  2287. cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
  2288. struct cmd_list_element *c, const char *value)
  2289. {
  2290. gdb_printf (file, _("Replay memory access is %s.\n"),
  2291. replay_memory_access);
  2292. }
  2293. /* The "set record btrace cpu none" command. */
  2294. static void
  2295. cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
  2296. {
  2297. if (args != nullptr && *args != 0)
  2298. error (_("Trailing junk: '%s'."), args);
  2299. record_btrace_cpu_state = CS_NONE;
  2300. }
  2301. /* The "set record btrace cpu auto" command. */
  2302. static void
  2303. cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
  2304. {
  2305. if (args != nullptr && *args != 0)
  2306. error (_("Trailing junk: '%s'."), args);
  2307. record_btrace_cpu_state = CS_AUTO;
  2308. }
  2309. /* The "set record btrace cpu" command. */
  2310. static void
  2311. cmd_set_record_btrace_cpu (const char *args, int from_tty)
  2312. {
  2313. if (args == nullptr)
  2314. args = "";
  2315. /* We use a hard-coded vendor string for now. */
  2316. unsigned int family, model, stepping;
  2317. int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
  2318. &model, &l1, &stepping, &l2);
  2319. if (matches == 3)
  2320. {
  2321. if (strlen (args) != l2)
  2322. error (_("Trailing junk: '%s'."), args + l2);
  2323. }
  2324. else if (matches == 2)
  2325. {
  2326. if (strlen (args) != l1)
  2327. error (_("Trailing junk: '%s'."), args + l1);
  2328. stepping = 0;
  2329. }
  2330. else
  2331. error (_("Bad format. See \"help set record btrace cpu\"."));
  2332. if (USHRT_MAX < family)
  2333. error (_("Cpu family too big."));
  2334. if (UCHAR_MAX < model)
  2335. error (_("Cpu model too big."));
  2336. if (UCHAR_MAX < stepping)
  2337. error (_("Cpu stepping too big."));
  2338. record_btrace_cpu.vendor = CV_INTEL;
  2339. record_btrace_cpu.family = family;
  2340. record_btrace_cpu.model = model;
  2341. record_btrace_cpu.stepping = stepping;
  2342. record_btrace_cpu_state = CS_CPU;
  2343. }
  2344. /* The "show record btrace cpu" command. */
  2345. static void
  2346. cmd_show_record_btrace_cpu (const char *args, int from_tty)
  2347. {
  2348. if (args != nullptr && *args != 0)
  2349. error (_("Trailing junk: '%s'."), args);
  2350. switch (record_btrace_cpu_state)
  2351. {
  2352. case CS_AUTO:
  2353. gdb_printf (_("btrace cpu is 'auto'.\n"));
  2354. return;
  2355. case CS_NONE:
  2356. gdb_printf (_("btrace cpu is 'none'.\n"));
  2357. return;
  2358. case CS_CPU:
  2359. switch (record_btrace_cpu.vendor)
  2360. {
  2361. case CV_INTEL:
  2362. if (record_btrace_cpu.stepping == 0)
  2363. gdb_printf (_("btrace cpu is 'intel: %u/%u'.\n"),
  2364. record_btrace_cpu.family,
  2365. record_btrace_cpu.model);
  2366. else
  2367. gdb_printf (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
  2368. record_btrace_cpu.family,
  2369. record_btrace_cpu.model,
  2370. record_btrace_cpu.stepping);
  2371. return;
  2372. }
  2373. }
  2374. error (_("Internal error: bad cpu state."));
  2375. }
  2376. /* The "record bts buffer-size" show value function. */
  2377. static void
  2378. show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
  2379. struct cmd_list_element *c,
  2380. const char *value)
  2381. {
  2382. gdb_printf (file, _("The record/replay bts buffer size is %s.\n"),
  2383. value);
  2384. }
  2385. /* The "record pt buffer-size" show value function. */
  2386. static void
  2387. show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
  2388. struct cmd_list_element *c,
  2389. const char *value)
  2390. {
  2391. gdb_printf (file, _("The record/replay pt buffer size is %s.\n"),
  2392. value);
  2393. }
  2394. /* Initialize btrace commands. */
  2395. void _initialize_record_btrace ();
  2396. void
  2397. _initialize_record_btrace ()
  2398. {
  2399. cmd_list_element *record_btrace_cmd
  2400. = add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
  2401. _("Start branch trace recording."),
  2402. &record_btrace_cmdlist, 0, &record_cmdlist);
  2403. add_alias_cmd ("b", record_btrace_cmd, class_obscure, 1, &record_cmdlist);
  2404. cmd_list_element *record_btrace_bts_cmd
  2405. = add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
  2406. _("\
  2407. Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
  2408. The processor stores a from/to record for each branch into a cyclic buffer.\n\
  2409. This format may not be available on all processors."),
  2410. &record_btrace_cmdlist);
  2411. add_alias_cmd ("bts", record_btrace_bts_cmd, class_obscure, 1,
  2412. &record_cmdlist);
  2413. cmd_list_element *record_btrace_pt_cmd
  2414. = add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
  2415. _("\
  2416. Start branch trace recording in Intel Processor Trace format.\n\n\
  2417. This format may not be available on all processors."),
  2418. &record_btrace_cmdlist);
  2419. add_alias_cmd ("pt", record_btrace_pt_cmd, class_obscure, 1, &record_cmdlist);
  2420. add_setshow_prefix_cmd ("btrace", class_support,
  2421. _("Set record options."),
  2422. _("Show record options."),
  2423. &set_record_btrace_cmdlist,
  2424. &show_record_btrace_cmdlist,
  2425. &set_record_cmdlist, &show_record_cmdlist);
  2426. add_setshow_enum_cmd ("replay-memory-access", no_class,
  2427. replay_memory_access_types, &replay_memory_access, _("\
  2428. Set what memory accesses are allowed during replay."), _("\
  2429. Show what memory accesses are allowed during replay."),
  2430. _("Default is READ-ONLY.\n\n\
  2431. The btrace record target does not trace data.\n\
  2432. The memory therefore corresponds to the live target and not \
  2433. to the current replay position.\n\n\
  2434. When READ-ONLY, allow accesses to read-only memory during replay.\n\
  2435. When READ-WRITE, allow accesses to read-only and read-write memory during \
  2436. replay."),
  2437. NULL, cmd_show_replay_memory_access,
  2438. &set_record_btrace_cmdlist,
  2439. &show_record_btrace_cmdlist);
  2440. add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
  2441. _("\
  2442. Set the cpu to be used for trace decode.\n\n\
  2443. The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
  2444. For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
  2445. When decoding branch trace, enable errata workarounds for the specified cpu.\n\
  2446. The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
  2447. When GDB does not support that cpu, this option can be used to enable\n\
  2448. workarounds for a similar cpu that GDB supports.\n\n\
  2449. When set to \"none\", errata workarounds are disabled."),
  2450. &set_record_btrace_cpu_cmdlist,
  2451. 1,
  2452. &set_record_btrace_cmdlist);
  2453. add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
  2454. Automatically determine the cpu to be used for trace decode."),
  2455. &set_record_btrace_cpu_cmdlist);
  2456. add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
  2457. Do not enable errata workarounds for trace decode."),
  2458. &set_record_btrace_cpu_cmdlist);
  2459. add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
  2460. Show the cpu to be used for trace decode."),
  2461. &show_record_btrace_cmdlist);
  2462. add_setshow_prefix_cmd ("bts", class_support,
  2463. _("Set record btrace bts options."),
  2464. _("Show record btrace bts options."),
  2465. &set_record_btrace_bts_cmdlist,
  2466. &show_record_btrace_bts_cmdlist,
  2467. &set_record_btrace_cmdlist,
  2468. &show_record_btrace_cmdlist);
  2469. add_setshow_uinteger_cmd ("buffer-size", no_class,
  2470. &record_btrace_conf.bts.size,
  2471. _("Set the record/replay bts buffer size."),
  2472. _("Show the record/replay bts buffer size."), _("\
  2473. When starting recording request a trace buffer of this size. \
  2474. The actual buffer size may differ from the requested size. \
  2475. Use \"info record\" to see the actual buffer size.\n\n\
  2476. Bigger buffers allow longer recording but also take more time to process \
  2477. the recorded execution trace.\n\n\
  2478. The trace buffer size may not be changed while recording."), NULL,
  2479. show_record_bts_buffer_size_value,
  2480. &set_record_btrace_bts_cmdlist,
  2481. &show_record_btrace_bts_cmdlist);
  2482. add_setshow_prefix_cmd ("pt", class_support,
  2483. _("Set record btrace pt options."),
  2484. _("Show record btrace pt options."),
  2485. &set_record_btrace_pt_cmdlist,
  2486. &show_record_btrace_pt_cmdlist,
  2487. &set_record_btrace_cmdlist,
  2488. &show_record_btrace_cmdlist);
  2489. add_setshow_uinteger_cmd ("buffer-size", no_class,
  2490. &record_btrace_conf.pt.size,
  2491. _("Set the record/replay pt buffer size."),
  2492. _("Show the record/replay pt buffer size."), _("\
  2493. Bigger buffers allow longer recording but also take more time to process \
  2494. the recorded execution.\n\
  2495. The actual buffer size may differ from the requested size. Use \"info record\" \
  2496. to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
  2497. &set_record_btrace_pt_cmdlist,
  2498. &show_record_btrace_pt_cmdlist);
  2499. add_target (record_btrace_target_info, record_btrace_target_open);
  2500. bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
  2501. xcalloc, xfree);
  2502. record_btrace_conf.bts.size = 64 * 1024;
  2503. record_btrace_conf.pt.size = 16 * 1024;
  2504. }