linux-x86-low.cc 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994
  1. /* GNU/Linux/x86-64 specific low level interface, for the remote server
  2. for GDB.
  3. Copyright (C) 2002-2022 Free Software Foundation, Inc.
  4. This file is part of GDB.
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. #include "server.h"
  16. #include <signal.h>
  17. #include <limits.h>
  18. #include <inttypes.h>
  19. #include "linux-low.h"
  20. #include "i387-fp.h"
  21. #include "x86-low.h"
  22. #include "gdbsupport/x86-xstate.h"
  23. #include "nat/gdb_ptrace.h"
  24. #ifdef __x86_64__
  25. #include "nat/amd64-linux-siginfo.h"
  26. #endif
  27. #include "gdb_proc_service.h"
  28. /* Don't include elf/common.h if linux/elf.h got included by
  29. gdb_proc_service.h. */
  30. #ifndef ELFMAG0
  31. #include "elf/common.h"
  32. #endif
  33. #include "gdbsupport/agent.h"
  34. #include "tdesc.h"
  35. #include "tracepoint.h"
  36. #include "ax.h"
  37. #include "nat/linux-nat.h"
  38. #include "nat/x86-linux.h"
  39. #include "nat/x86-linux-dregs.h"
  40. #include "linux-x86-tdesc.h"
  41. #ifdef __x86_64__
  42. static target_desc_up tdesc_amd64_linux_no_xml;
  43. #endif
  44. static target_desc_up tdesc_i386_linux_no_xml;
  45. static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
  46. static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
  47. /* Backward compatibility for gdb without XML support. */
  48. static const char xmltarget_i386_linux_no_xml[] = "@<target>\
  49. <architecture>i386</architecture>\
  50. <osabi>GNU/Linux</osabi>\
  51. </target>";
  52. #ifdef __x86_64__
  53. static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
  54. <architecture>i386:x86-64</architecture>\
  55. <osabi>GNU/Linux</osabi>\
  56. </target>";
  57. #endif
  58. #include <sys/reg.h>
  59. #include <sys/procfs.h>
  60. #include <sys/uio.h>
  61. #ifndef PTRACE_GET_THREAD_AREA
  62. #define PTRACE_GET_THREAD_AREA 25
  63. #endif
  64. /* This definition comes from prctl.h, but some kernels may not have it. */
  65. #ifndef PTRACE_ARCH_PRCTL
  66. #define PTRACE_ARCH_PRCTL 30
  67. #endif
  68. /* The following definitions come from prctl.h, but may be absent
  69. for certain configurations. */
  70. #ifndef ARCH_GET_FS
  71. #define ARCH_SET_GS 0x1001
  72. #define ARCH_SET_FS 0x1002
  73. #define ARCH_GET_FS 0x1003
  74. #define ARCH_GET_GS 0x1004
  75. #endif
  76. /* Linux target op definitions for the x86 architecture.
  77. This is initialized assuming an amd64 target.
  78. 'low_arch_setup' will correct it for i386 or amd64 targets. */
  79. class x86_target : public linux_process_target
  80. {
  81. public:
  82. const regs_info *get_regs_info () override;
  83. const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
  84. bool supports_z_point_type (char z_type) override;
  85. void process_qsupported (gdb::array_view<const char * const> features) override;
  86. bool supports_tracepoints () override;
  87. bool supports_fast_tracepoints () override;
  88. int install_fast_tracepoint_jump_pad
  89. (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
  90. CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
  91. CORE_ADDR *trampoline, ULONGEST *trampoline_size,
  92. unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
  93. CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
  94. char *err) override;
  95. int get_min_fast_tracepoint_insn_len () override;
  96. struct emit_ops *emit_ops () override;
  97. int get_ipa_tdesc_idx () override;
  98. protected:
  99. void low_arch_setup () override;
  100. bool low_cannot_fetch_register (int regno) override;
  101. bool low_cannot_store_register (int regno) override;
  102. bool low_supports_breakpoints () override;
  103. CORE_ADDR low_get_pc (regcache *regcache) override;
  104. void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
  105. int low_decr_pc_after_break () override;
  106. bool low_breakpoint_at (CORE_ADDR pc) override;
  107. int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
  108. int size, raw_breakpoint *bp) override;
  109. int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
  110. int size, raw_breakpoint *bp) override;
  111. bool low_stopped_by_watchpoint () override;
  112. CORE_ADDR low_stopped_data_address () override;
  113. /* collect_ptrace_register/supply_ptrace_register are not needed in the
  114. native i386 case (no registers smaller than an xfer unit), and are not
  115. used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
  116. /* Need to fix up i386 siginfo if host is amd64. */
  117. bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
  118. int direction) override;
  119. arch_process_info *low_new_process () override;
  120. void low_delete_process (arch_process_info *info) override;
  121. void low_new_thread (lwp_info *) override;
  122. void low_delete_thread (arch_lwp_info *) override;
  123. void low_new_fork (process_info *parent, process_info *child) override;
  124. void low_prepare_to_resume (lwp_info *lwp) override;
  125. int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
  126. bool low_supports_range_stepping () override;
  127. bool low_supports_catch_syscall () override;
  128. void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
  129. private:
  130. /* Update all the target description of all processes; a new GDB
  131. connected, and it may or not support xml target descriptions. */
  132. void update_xmltarget ();
  133. };
  134. /* The singleton target ops object. */
  135. static x86_target the_x86_target;
  136. /* Per-process arch-specific data we want to keep. */
  137. struct arch_process_info
  138. {
  139. struct x86_debug_reg_state debug_reg_state;
  140. };
  141. #ifdef __x86_64__
  142. /* Mapping between the general-purpose registers in `struct user'
  143. format and GDB's register array layout.
  144. Note that the transfer layout uses 64-bit regs. */
  145. static /*const*/ int i386_regmap[] =
  146. {
  147. RAX * 8, RCX * 8, RDX * 8, RBX * 8,
  148. RSP * 8, RBP * 8, RSI * 8, RDI * 8,
  149. RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
  150. DS * 8, ES * 8, FS * 8, GS * 8
  151. };
  152. #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
  153. /* So code below doesn't have to care, i386 or amd64. */
  154. #define ORIG_EAX ORIG_RAX
  155. #define REGSIZE 8
  156. static const int x86_64_regmap[] =
  157. {
  158. RAX * 8, RBX * 8, RCX * 8, RDX * 8,
  159. RSI * 8, RDI * 8, RBP * 8, RSP * 8,
  160. R8 * 8, R9 * 8, R10 * 8, R11 * 8,
  161. R12 * 8, R13 * 8, R14 * 8, R15 * 8,
  162. RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
  163. DS * 8, ES * 8, FS * 8, GS * 8,
  164. -1, -1, -1, -1, -1, -1, -1, -1,
  165. -1, -1, -1, -1, -1, -1, -1, -1,
  166. -1, -1, -1, -1, -1, -1, -1, -1,
  167. -1,
  168. -1, -1, -1, -1, -1, -1, -1, -1,
  169. ORIG_RAX * 8,
  170. 21 * 8, 22 * 8,
  171. -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
  172. -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
  173. -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
  174. -1, -1, -1, -1, -1, -1, -1, -1,
  175. -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
  176. -1, -1, -1, -1, -1, -1, -1, -1,
  177. -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
  178. -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
  179. -1, -1, -1, -1, -1, -1, -1, -1,
  180. -1, -1, -1, -1, -1, -1, -1, -1,
  181. -1, -1, -1, -1, -1, -1, -1, -1,
  182. -1 /* pkru */
  183. };
  184. #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
  185. #define X86_64_USER_REGS (GS + 1)
  186. #else /* ! __x86_64__ */
  187. /* Mapping between the general-purpose registers in `struct user'
  188. format and GDB's register array layout. */
  189. static /*const*/ int i386_regmap[] =
  190. {
  191. EAX * 4, ECX * 4, EDX * 4, EBX * 4,
  192. UESP * 4, EBP * 4, ESI * 4, EDI * 4,
  193. EIP * 4, EFL * 4, CS * 4, SS * 4,
  194. DS * 4, ES * 4, FS * 4, GS * 4
  195. };
  196. #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
  197. #define REGSIZE 4
  198. #endif
  199. #ifdef __x86_64__
  200. /* Returns true if the current inferior belongs to a x86-64 process,
  201. per the tdesc. */
  202. static int
  203. is_64bit_tdesc (void)
  204. {
  205. struct regcache *regcache = get_thread_regcache (current_thread, 0);
  206. return register_size (regcache->tdesc, 0) == 8;
  207. }
  208. #endif
  209. /* Called by libthread_db. */
  210. ps_err_e
  211. ps_get_thread_area (struct ps_prochandle *ph,
  212. lwpid_t lwpid, int idx, void **base)
  213. {
  214. #ifdef __x86_64__
  215. int use_64bit = is_64bit_tdesc ();
  216. if (use_64bit)
  217. {
  218. switch (idx)
  219. {
  220. case FS:
  221. if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
  222. return PS_OK;
  223. break;
  224. case GS:
  225. if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
  226. return PS_OK;
  227. break;
  228. default:
  229. return PS_BADADDR;
  230. }
  231. return PS_ERR;
  232. }
  233. #endif
  234. {
  235. unsigned int desc[4];
  236. if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
  237. (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
  238. return PS_ERR;
  239. /* Ensure we properly extend the value to 64-bits for x86_64. */
  240. *base = (void *) (uintptr_t) desc[1];
  241. return PS_OK;
  242. }
  243. }
  244. /* Get the thread area address. This is used to recognize which
  245. thread is which when tracing with the in-process agent library. We
  246. don't read anything from the address, and treat it as opaque; it's
  247. the address itself that we assume is unique per-thread. */
  248. int
  249. x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
  250. {
  251. #ifdef __x86_64__
  252. int use_64bit = is_64bit_tdesc ();
  253. if (use_64bit)
  254. {
  255. void *base;
  256. if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
  257. {
  258. *addr = (CORE_ADDR) (uintptr_t) base;
  259. return 0;
  260. }
  261. return -1;
  262. }
  263. #endif
  264. {
  265. struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
  266. struct thread_info *thr = get_lwp_thread (lwp);
  267. struct regcache *regcache = get_thread_regcache (thr, 1);
  268. unsigned int desc[4];
  269. ULONGEST gs = 0;
  270. const int reg_thread_area = 3; /* bits to scale down register value. */
  271. int idx;
  272. collect_register_by_name (regcache, "gs", &gs);
  273. idx = gs >> reg_thread_area;
  274. if (ptrace (PTRACE_GET_THREAD_AREA,
  275. lwpid_of (thr),
  276. (void *) (long) idx, (unsigned long) &desc) < 0)
  277. return -1;
  278. *addr = desc[1];
  279. return 0;
  280. }
  281. }
  282. bool
  283. x86_target::low_cannot_store_register (int regno)
  284. {
  285. #ifdef __x86_64__
  286. if (is_64bit_tdesc ())
  287. return false;
  288. #endif
  289. return regno >= I386_NUM_REGS;
  290. }
  291. bool
  292. x86_target::low_cannot_fetch_register (int regno)
  293. {
  294. #ifdef __x86_64__
  295. if (is_64bit_tdesc ())
  296. return false;
  297. #endif
  298. return regno >= I386_NUM_REGS;
  299. }
  300. static void
  301. collect_register_i386 (struct regcache *regcache, int regno, void *buf)
  302. {
  303. collect_register (regcache, regno, buf);
  304. #ifdef __x86_64__
  305. /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
  306. space reserved in buf for the register is 8 bytes. Make sure the entire
  307. reserved space is initialized. */
  308. gdb_assert (register_size (regcache->tdesc, regno) == 4);
  309. if (regno == RAX)
  310. {
  311. /* Sign extend EAX value to avoid potential syscall restart
  312. problems.
  313. See amd64_linux_collect_native_gregset() in
  314. gdb/amd64-linux-nat.c for a detailed explanation. */
  315. *(int64_t *) buf = *(int32_t *) buf;
  316. }
  317. else
  318. {
  319. /* Zero-extend. */
  320. *(uint64_t *) buf = *(uint32_t *) buf;
  321. }
  322. #endif
  323. }
  324. static void
  325. x86_fill_gregset (struct regcache *regcache, void *buf)
  326. {
  327. int i;
  328. #ifdef __x86_64__
  329. if (register_size (regcache->tdesc, 0) == 8)
  330. {
  331. for (i = 0; i < X86_64_NUM_REGS; i++)
  332. if (x86_64_regmap[i] != -1)
  333. collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
  334. return;
  335. }
  336. #endif
  337. for (i = 0; i < I386_NUM_REGS; i++)
  338. collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
  339. /* Handle ORIG_EAX, which is not in i386_regmap. */
  340. collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
  341. ((char *) buf) + ORIG_EAX * REGSIZE);
  342. }
  343. static void
  344. x86_store_gregset (struct regcache *regcache, const void *buf)
  345. {
  346. int i;
  347. #ifdef __x86_64__
  348. if (register_size (regcache->tdesc, 0) == 8)
  349. {
  350. for (i = 0; i < X86_64_NUM_REGS; i++)
  351. if (x86_64_regmap[i] != -1)
  352. supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
  353. return;
  354. }
  355. #endif
  356. for (i = 0; i < I386_NUM_REGS; i++)
  357. supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
  358. supply_register_by_name (regcache, "orig_eax",
  359. ((char *) buf) + ORIG_EAX * REGSIZE);
  360. }
  361. static void
  362. x86_fill_fpregset (struct regcache *regcache, void *buf)
  363. {
  364. #ifdef __x86_64__
  365. i387_cache_to_fxsave (regcache, buf);
  366. #else
  367. i387_cache_to_fsave (regcache, buf);
  368. #endif
  369. }
  370. static void
  371. x86_store_fpregset (struct regcache *regcache, const void *buf)
  372. {
  373. #ifdef __x86_64__
  374. i387_fxsave_to_cache (regcache, buf);
  375. #else
  376. i387_fsave_to_cache (regcache, buf);
  377. #endif
  378. }
  379. #ifndef __x86_64__
  380. static void
  381. x86_fill_fpxregset (struct regcache *regcache, void *buf)
  382. {
  383. i387_cache_to_fxsave (regcache, buf);
  384. }
  385. static void
  386. x86_store_fpxregset (struct regcache *regcache, const void *buf)
  387. {
  388. i387_fxsave_to_cache (regcache, buf);
  389. }
  390. #endif
  391. static void
  392. x86_fill_xstateregset (struct regcache *regcache, void *buf)
  393. {
  394. i387_cache_to_xsave (regcache, buf);
  395. }
  396. static void
  397. x86_store_xstateregset (struct regcache *regcache, const void *buf)
  398. {
  399. i387_xsave_to_cache (regcache, buf);
  400. }
  401. /* ??? The non-biarch i386 case stores all the i387 regs twice.
  402. Once in i387_.*fsave.* and once in i387_.*fxsave.*.
  403. This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
  404. doesn't work. IWBN to avoid the duplication in the case where it
  405. does work. Maybe the arch_setup routine could check whether it works
  406. and update the supported regsets accordingly. */
  407. static struct regset_info x86_regsets[] =
  408. {
  409. #ifdef HAVE_PTRACE_GETREGS
  410. { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
  411. GENERAL_REGS,
  412. x86_fill_gregset, x86_store_gregset },
  413. { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
  414. EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
  415. # ifndef __x86_64__
  416. # ifdef HAVE_PTRACE_GETFPXREGS
  417. { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
  418. EXTENDED_REGS,
  419. x86_fill_fpxregset, x86_store_fpxregset },
  420. # endif
  421. # endif
  422. { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
  423. FP_REGS,
  424. x86_fill_fpregset, x86_store_fpregset },
  425. #endif /* HAVE_PTRACE_GETREGS */
  426. NULL_REGSET
  427. };
  428. bool
  429. x86_target::low_supports_breakpoints ()
  430. {
  431. return true;
  432. }
  433. CORE_ADDR
  434. x86_target::low_get_pc (regcache *regcache)
  435. {
  436. int use_64bit = register_size (regcache->tdesc, 0) == 8;
  437. if (use_64bit)
  438. {
  439. uint64_t pc;
  440. collect_register_by_name (regcache, "rip", &pc);
  441. return (CORE_ADDR) pc;
  442. }
  443. else
  444. {
  445. uint32_t pc;
  446. collect_register_by_name (regcache, "eip", &pc);
  447. return (CORE_ADDR) pc;
  448. }
  449. }
  450. void
  451. x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
  452. {
  453. int use_64bit = register_size (regcache->tdesc, 0) == 8;
  454. if (use_64bit)
  455. {
  456. uint64_t newpc = pc;
  457. supply_register_by_name (regcache, "rip", &newpc);
  458. }
  459. else
  460. {
  461. uint32_t newpc = pc;
  462. supply_register_by_name (regcache, "eip", &newpc);
  463. }
  464. }
  465. int
  466. x86_target::low_decr_pc_after_break ()
  467. {
  468. return 1;
  469. }
  470. static const gdb_byte x86_breakpoint[] = { 0xCC };
  471. #define x86_breakpoint_len 1
  472. bool
  473. x86_target::low_breakpoint_at (CORE_ADDR pc)
  474. {
  475. unsigned char c;
  476. read_memory (pc, &c, 1);
  477. if (c == 0xCC)
  478. return true;
  479. return false;
  480. }
  481. /* Low-level function vector. */
  482. struct x86_dr_low_type x86_dr_low =
  483. {
  484. x86_linux_dr_set_control,
  485. x86_linux_dr_set_addr,
  486. x86_linux_dr_get_addr,
  487. x86_linux_dr_get_status,
  488. x86_linux_dr_get_control,
  489. sizeof (void *),
  490. };
  491. /* Breakpoint/Watchpoint support. */
  492. bool
  493. x86_target::supports_z_point_type (char z_type)
  494. {
  495. switch (z_type)
  496. {
  497. case Z_PACKET_SW_BP:
  498. case Z_PACKET_HW_BP:
  499. case Z_PACKET_WRITE_WP:
  500. case Z_PACKET_ACCESS_WP:
  501. return true;
  502. default:
  503. return false;
  504. }
  505. }
  506. int
  507. x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
  508. int size, raw_breakpoint *bp)
  509. {
  510. struct process_info *proc = current_process ();
  511. switch (type)
  512. {
  513. case raw_bkpt_type_hw:
  514. case raw_bkpt_type_write_wp:
  515. case raw_bkpt_type_access_wp:
  516. {
  517. enum target_hw_bp_type hw_type
  518. = raw_bkpt_type_to_target_hw_bp_type (type);
  519. struct x86_debug_reg_state *state
  520. = &proc->priv->arch_private->debug_reg_state;
  521. return x86_dr_insert_watchpoint (state, hw_type, addr, size);
  522. }
  523. default:
  524. /* Unsupported. */
  525. return 1;
  526. }
  527. }
  528. int
  529. x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
  530. int size, raw_breakpoint *bp)
  531. {
  532. struct process_info *proc = current_process ();
  533. switch (type)
  534. {
  535. case raw_bkpt_type_hw:
  536. case raw_bkpt_type_write_wp:
  537. case raw_bkpt_type_access_wp:
  538. {
  539. enum target_hw_bp_type hw_type
  540. = raw_bkpt_type_to_target_hw_bp_type (type);
  541. struct x86_debug_reg_state *state
  542. = &proc->priv->arch_private->debug_reg_state;
  543. return x86_dr_remove_watchpoint (state, hw_type, addr, size);
  544. }
  545. default:
  546. /* Unsupported. */
  547. return 1;
  548. }
  549. }
  550. bool
  551. x86_target::low_stopped_by_watchpoint ()
  552. {
  553. struct process_info *proc = current_process ();
  554. return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
  555. }
  556. CORE_ADDR
  557. x86_target::low_stopped_data_address ()
  558. {
  559. struct process_info *proc = current_process ();
  560. CORE_ADDR addr;
  561. if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
  562. &addr))
  563. return addr;
  564. return 0;
  565. }
  566. /* Called when a new process is created. */
  567. arch_process_info *
  568. x86_target::low_new_process ()
  569. {
  570. struct arch_process_info *info = XCNEW (struct arch_process_info);
  571. x86_low_init_dregs (&info->debug_reg_state);
  572. return info;
  573. }
  574. /* Called when a process is being deleted. */
  575. void
  576. x86_target::low_delete_process (arch_process_info *info)
  577. {
  578. xfree (info);
  579. }
  580. void
  581. x86_target::low_new_thread (lwp_info *lwp)
  582. {
  583. /* This comes from nat/. */
  584. x86_linux_new_thread (lwp);
  585. }
  586. void
  587. x86_target::low_delete_thread (arch_lwp_info *alwp)
  588. {
  589. /* This comes from nat/. */
  590. x86_linux_delete_thread (alwp);
  591. }
  592. /* Target routine for new_fork. */
  593. void
  594. x86_target::low_new_fork (process_info *parent, process_info *child)
  595. {
  596. /* These are allocated by linux_add_process. */
  597. gdb_assert (parent->priv != NULL
  598. && parent->priv->arch_private != NULL);
  599. gdb_assert (child->priv != NULL
  600. && child->priv->arch_private != NULL);
  601. /* Linux kernel before 2.6.33 commit
  602. 72f674d203cd230426437cdcf7dd6f681dad8b0d
  603. will inherit hardware debug registers from parent
  604. on fork/vfork/clone. Newer Linux kernels create such tasks with
  605. zeroed debug registers.
  606. GDB core assumes the child inherits the watchpoints/hw
  607. breakpoints of the parent, and will remove them all from the
  608. forked off process. Copy the debug registers mirrors into the
  609. new process so that all breakpoints and watchpoints can be
  610. removed together. The debug registers mirror will become zeroed
  611. in the end before detaching the forked off process, thus making
  612. this compatible with older Linux kernels too. */
  613. *child->priv->arch_private = *parent->priv->arch_private;
  614. }
  615. void
  616. x86_target::low_prepare_to_resume (lwp_info *lwp)
  617. {
  618. /* This comes from nat/. */
  619. x86_linux_prepare_to_resume (lwp);
  620. }
  621. /* See nat/x86-dregs.h. */
  622. struct x86_debug_reg_state *
  623. x86_debug_reg_state (pid_t pid)
  624. {
  625. struct process_info *proc = find_process_pid (pid);
  626. return &proc->priv->arch_private->debug_reg_state;
  627. }
  628. /* When GDBSERVER is built as a 64-bit application on linux, the
  629. PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
  630. debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
  631. as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
  632. conversion in-place ourselves. */
  633. /* Convert a ptrace/host siginfo object, into/from the siginfo in the
  634. layout of the inferiors' architecture. Returns true if any
  635. conversion was done; false otherwise. If DIRECTION is 1, then copy
  636. from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
  637. INF. */
  638. bool
  639. x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
  640. {
  641. #ifdef __x86_64__
  642. unsigned int machine;
  643. int tid = lwpid_of (current_thread);
  644. int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
  645. /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
  646. if (!is_64bit_tdesc ())
  647. return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
  648. FIXUP_32);
  649. /* No fixup for native x32 GDB. */
  650. else if (!is_elf64 && sizeof (void *) == 8)
  651. return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
  652. FIXUP_X32);
  653. #endif
  654. return false;
  655. }
  656. static int use_xml;
  657. /* Format of XSAVE extended state is:
  658. struct
  659. {
  660. fxsave_bytes[0..463]
  661. sw_usable_bytes[464..511]
  662. xstate_hdr_bytes[512..575]
  663. avx_bytes[576..831]
  664. future_state etc
  665. };
  666. Same memory layout will be used for the coredump NT_X86_XSTATE
  667. representing the XSAVE extended state registers.
  668. The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
  669. extended state mask, which is the same as the extended control register
  670. 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
  671. together with the mask saved in the xstate_hdr_bytes to determine what
  672. states the processor/OS supports and what state, used or initialized,
  673. the process/thread is in. */
  674. #define I386_LINUX_XSAVE_XCR0_OFFSET 464
  675. /* Does the current host support the GETFPXREGS request? The header
  676. file may or may not define it, and even if it is defined, the
  677. kernel will return EIO if it's running on a pre-SSE processor. */
  678. int have_ptrace_getfpxregs =
  679. #ifdef HAVE_PTRACE_GETFPXREGS
  680. -1
  681. #else
  682. 0
  683. #endif
  684. ;
  685. /* Get Linux/x86 target description from running target. */
  686. static const struct target_desc *
  687. x86_linux_read_description (void)
  688. {
  689. unsigned int machine;
  690. int is_elf64;
  691. int xcr0_features;
  692. int tid;
  693. static uint64_t xcr0;
  694. struct regset_info *regset;
  695. tid = lwpid_of (current_thread);
  696. is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
  697. if (sizeof (void *) == 4)
  698. {
  699. if (is_elf64 > 0)
  700. error (_("Can't debug 64-bit process with 32-bit GDBserver"));
  701. #ifndef __x86_64__
  702. else if (machine == EM_X86_64)
  703. error (_("Can't debug x86-64 process with 32-bit GDBserver"));
  704. #endif
  705. }
  706. #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
  707. if (machine == EM_386 && have_ptrace_getfpxregs == -1)
  708. {
  709. elf_fpxregset_t fpxregs;
  710. if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
  711. {
  712. have_ptrace_getfpxregs = 0;
  713. have_ptrace_getregset = 0;
  714. return i386_linux_read_description (X86_XSTATE_X87);
  715. }
  716. else
  717. have_ptrace_getfpxregs = 1;
  718. }
  719. #endif
  720. if (!use_xml)
  721. {
  722. x86_xcr0 = X86_XSTATE_SSE_MASK;
  723. /* Don't use XML. */
  724. #ifdef __x86_64__
  725. if (machine == EM_X86_64)
  726. return tdesc_amd64_linux_no_xml.get ();
  727. else
  728. #endif
  729. return tdesc_i386_linux_no_xml.get ();
  730. }
  731. if (have_ptrace_getregset == -1)
  732. {
  733. uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
  734. struct iovec iov;
  735. iov.iov_base = xstateregs;
  736. iov.iov_len = sizeof (xstateregs);
  737. /* Check if PTRACE_GETREGSET works. */
  738. if (ptrace (PTRACE_GETREGSET, tid,
  739. (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
  740. have_ptrace_getregset = 0;
  741. else
  742. {
  743. have_ptrace_getregset = 1;
  744. /* Get XCR0 from XSAVE extended state. */
  745. xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
  746. / sizeof (uint64_t))];
  747. /* Use PTRACE_GETREGSET if it is available. */
  748. for (regset = x86_regsets;
  749. regset->fill_function != NULL; regset++)
  750. if (regset->get_request == PTRACE_GETREGSET)
  751. regset->size = X86_XSTATE_SIZE (xcr0);
  752. else if (regset->type != GENERAL_REGS)
  753. regset->size = 0;
  754. }
  755. }
  756. /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
  757. xcr0_features = (have_ptrace_getregset
  758. && (xcr0 & X86_XSTATE_ALL_MASK));
  759. if (xcr0_features)
  760. x86_xcr0 = xcr0;
  761. if (machine == EM_X86_64)
  762. {
  763. #ifdef __x86_64__
  764. const target_desc *tdesc = NULL;
  765. if (xcr0_features)
  766. {
  767. tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
  768. !is_elf64);
  769. }
  770. if (tdesc == NULL)
  771. tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
  772. return tdesc;
  773. #endif
  774. }
  775. else
  776. {
  777. const target_desc *tdesc = NULL;
  778. if (xcr0_features)
  779. tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
  780. if (tdesc == NULL)
  781. tdesc = i386_linux_read_description (X86_XSTATE_SSE);
  782. return tdesc;
  783. }
  784. gdb_assert_not_reached ("failed to return tdesc");
  785. }
  786. /* Update all the target description of all processes; a new GDB
  787. connected, and it may or not support xml target descriptions. */
  788. void
  789. x86_target::update_xmltarget ()
  790. {
  791. scoped_restore_current_thread restore_thread;
  792. /* Before changing the register cache's internal layout, flush the
  793. contents of the current valid caches back to the threads, and
  794. release the current regcache objects. */
  795. regcache_release ();
  796. for_each_process ([this] (process_info *proc) {
  797. int pid = proc->pid;
  798. /* Look up any thread of this process. */
  799. switch_to_thread (find_any_thread_of_pid (pid));
  800. low_arch_setup ();
  801. });
  802. }
  803. /* Process qSupported query, "xmlRegisters=". Update the buffer size for
  804. PTRACE_GETREGSET. */
  805. void
  806. x86_target::process_qsupported (gdb::array_view<const char * const> features)
  807. {
  808. /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
  809. with "i386" in qSupported query, it supports x86 XML target
  810. descriptions. */
  811. use_xml = 0;
  812. for (const char *feature : features)
  813. {
  814. if (startswith (feature, "xmlRegisters="))
  815. {
  816. char *copy = xstrdup (feature + 13);
  817. char *saveptr;
  818. for (char *p = strtok_r (copy, ",", &saveptr);
  819. p != NULL;
  820. p = strtok_r (NULL, ",", &saveptr))
  821. {
  822. if (strcmp (p, "i386") == 0)
  823. {
  824. use_xml = 1;
  825. break;
  826. }
  827. }
  828. free (copy);
  829. }
  830. }
  831. update_xmltarget ();
  832. }
  833. /* Common for x86/x86-64. */
  834. static struct regsets_info x86_regsets_info =
  835. {
  836. x86_regsets, /* regsets */
  837. 0, /* num_regsets */
  838. NULL, /* disabled_regsets */
  839. };
  840. #ifdef __x86_64__
  841. static struct regs_info amd64_linux_regs_info =
  842. {
  843. NULL, /* regset_bitmap */
  844. NULL, /* usrregs_info */
  845. &x86_regsets_info
  846. };
  847. #endif
  848. static struct usrregs_info i386_linux_usrregs_info =
  849. {
  850. I386_NUM_REGS,
  851. i386_regmap,
  852. };
  853. static struct regs_info i386_linux_regs_info =
  854. {
  855. NULL, /* regset_bitmap */
  856. &i386_linux_usrregs_info,
  857. &x86_regsets_info
  858. };
  859. const regs_info *
  860. x86_target::get_regs_info ()
  861. {
  862. #ifdef __x86_64__
  863. if (is_64bit_tdesc ())
  864. return &amd64_linux_regs_info;
  865. else
  866. #endif
  867. return &i386_linux_regs_info;
  868. }
  869. /* Initialize the target description for the architecture of the
  870. inferior. */
  871. void
  872. x86_target::low_arch_setup ()
  873. {
  874. current_process ()->tdesc = x86_linux_read_description ();
  875. }
  876. bool
  877. x86_target::low_supports_catch_syscall ()
  878. {
  879. return true;
  880. }
  881. /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
  882. code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
  883. void
  884. x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
  885. {
  886. int use_64bit = register_size (regcache->tdesc, 0) == 8;
  887. if (use_64bit)
  888. {
  889. long l_sysno;
  890. collect_register_by_name (regcache, "orig_rax", &l_sysno);
  891. *sysno = (int) l_sysno;
  892. }
  893. else
  894. collect_register_by_name (regcache, "orig_eax", sysno);
  895. }
  896. bool
  897. x86_target::supports_tracepoints ()
  898. {
  899. return true;
  900. }
  901. static void
  902. append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
  903. {
  904. target_write_memory (*to, buf, len);
  905. *to += len;
  906. }
  907. static int
  908. push_opcode (unsigned char *buf, const char *op)
  909. {
  910. unsigned char *buf_org = buf;
  911. while (1)
  912. {
  913. char *endptr;
  914. unsigned long ul = strtoul (op, &endptr, 16);
  915. if (endptr == op)
  916. break;
  917. *buf++ = ul;
  918. op = endptr;
  919. }
  920. return buf - buf_org;
  921. }
  922. #ifdef __x86_64__
  923. /* Build a jump pad that saves registers and calls a collection
  924. function. Writes a jump instruction to the jump pad to
  925. JJUMPAD_INSN. The caller is responsible to write it in at the
  926. tracepoint address. */
  927. static int
  928. amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
  929. CORE_ADDR collector,
  930. CORE_ADDR lockaddr,
  931. ULONGEST orig_size,
  932. CORE_ADDR *jump_entry,
  933. CORE_ADDR *trampoline,
  934. ULONGEST *trampoline_size,
  935. unsigned char *jjump_pad_insn,
  936. ULONGEST *jjump_pad_insn_size,
  937. CORE_ADDR *adjusted_insn_addr,
  938. CORE_ADDR *adjusted_insn_addr_end,
  939. char *err)
  940. {
  941. unsigned char buf[40];
  942. int i, offset;
  943. int64_t loffset;
  944. CORE_ADDR buildaddr = *jump_entry;
  945. /* Build the jump pad. */
  946. /* First, do tracepoint data collection. Save registers. */
  947. i = 0;
  948. /* Need to ensure stack pointer saved first. */
  949. buf[i++] = 0x54; /* push %rsp */
  950. buf[i++] = 0x55; /* push %rbp */
  951. buf[i++] = 0x57; /* push %rdi */
  952. buf[i++] = 0x56; /* push %rsi */
  953. buf[i++] = 0x52; /* push %rdx */
  954. buf[i++] = 0x51; /* push %rcx */
  955. buf[i++] = 0x53; /* push %rbx */
  956. buf[i++] = 0x50; /* push %rax */
  957. buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
  958. buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
  959. buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
  960. buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
  961. buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
  962. buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
  963. buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
  964. buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
  965. buf[i++] = 0x9c; /* pushfq */
  966. buf[i++] = 0x48; /* movabs <addr>,%rdi */
  967. buf[i++] = 0xbf;
  968. memcpy (buf + i, &tpaddr, 8);
  969. i += 8;
  970. buf[i++] = 0x57; /* push %rdi */
  971. append_insns (&buildaddr, i, buf);
  972. /* Stack space for the collecting_t object. */
  973. i = 0;
  974. i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
  975. i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
  976. memcpy (buf + i, &tpoint, 8);
  977. i += 8;
  978. i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
  979. i += push_opcode (&buf[i],
  980. "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
  981. i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
  982. append_insns (&buildaddr, i, buf);
  983. /* spin-lock. */
  984. i = 0;
  985. i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
  986. memcpy (&buf[i], (void *) &lockaddr, 8);
  987. i += 8;
  988. i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
  989. i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
  990. i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
  991. i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
  992. i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
  993. append_insns (&buildaddr, i, buf);
  994. /* Set up the gdb_collect call. */
  995. /* At this point, (stack pointer + 0x18) is the base of our saved
  996. register block. */
  997. i = 0;
  998. i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
  999. i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
  1000. /* tpoint address may be 64-bit wide. */
  1001. i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
  1002. memcpy (buf + i, &tpoint, 8);
  1003. i += 8;
  1004. append_insns (&buildaddr, i, buf);
  1005. /* The collector function being in the shared library, may be
  1006. >31-bits away off the jump pad. */
  1007. i = 0;
  1008. i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
  1009. memcpy (buf + i, &collector, 8);
  1010. i += 8;
  1011. i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
  1012. append_insns (&buildaddr, i, buf);
  1013. /* Clear the spin-lock. */
  1014. i = 0;
  1015. i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
  1016. i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
  1017. memcpy (buf + i, &lockaddr, 8);
  1018. i += 8;
  1019. append_insns (&buildaddr, i, buf);
  1020. /* Remove stack that had been used for the collect_t object. */
  1021. i = 0;
  1022. i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
  1023. append_insns (&buildaddr, i, buf);
  1024. /* Restore register state. */
  1025. i = 0;
  1026. buf[i++] = 0x48; /* add $0x8,%rsp */
  1027. buf[i++] = 0x83;
  1028. buf[i++] = 0xc4;
  1029. buf[i++] = 0x08;
  1030. buf[i++] = 0x9d; /* popfq */
  1031. buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
  1032. buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
  1033. buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
  1034. buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
  1035. buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
  1036. buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
  1037. buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
  1038. buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
  1039. buf[i++] = 0x58; /* pop %rax */
  1040. buf[i++] = 0x5b; /* pop %rbx */
  1041. buf[i++] = 0x59; /* pop %rcx */
  1042. buf[i++] = 0x5a; /* pop %rdx */
  1043. buf[i++] = 0x5e; /* pop %rsi */
  1044. buf[i++] = 0x5f; /* pop %rdi */
  1045. buf[i++] = 0x5d; /* pop %rbp */
  1046. buf[i++] = 0x5c; /* pop %rsp */
  1047. append_insns (&buildaddr, i, buf);
  1048. /* Now, adjust the original instruction to execute in the jump
  1049. pad. */
  1050. *adjusted_insn_addr = buildaddr;
  1051. relocate_instruction (&buildaddr, tpaddr);
  1052. *adjusted_insn_addr_end = buildaddr;
  1053. /* Finally, write a jump back to the program. */
  1054. loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
  1055. if (loffset > INT_MAX || loffset < INT_MIN)
  1056. {
  1057. sprintf (err,
  1058. "E.Jump back from jump pad too far from tracepoint "
  1059. "(offset 0x%" PRIx64 " > int32).", loffset);
  1060. return 1;
  1061. }
  1062. offset = (int) loffset;
  1063. memcpy (buf, jump_insn, sizeof (jump_insn));
  1064. memcpy (buf + 1, &offset, 4);
  1065. append_insns (&buildaddr, sizeof (jump_insn), buf);
  1066. /* The jump pad is now built. Wire in a jump to our jump pad. This
  1067. is always done last (by our caller actually), so that we can
  1068. install fast tracepoints with threads running. This relies on
  1069. the agent's atomic write support. */
  1070. loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
  1071. if (loffset > INT_MAX || loffset < INT_MIN)
  1072. {
  1073. sprintf (err,
  1074. "E.Jump pad too far from tracepoint "
  1075. "(offset 0x%" PRIx64 " > int32).", loffset);
  1076. return 1;
  1077. }
  1078. offset = (int) loffset;
  1079. memcpy (buf, jump_insn, sizeof (jump_insn));
  1080. memcpy (buf + 1, &offset, 4);
  1081. memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
  1082. *jjump_pad_insn_size = sizeof (jump_insn);
  1083. /* Return the end address of our pad. */
  1084. *jump_entry = buildaddr;
  1085. return 0;
  1086. }
  1087. #endif /* __x86_64__ */
  1088. /* Build a jump pad that saves registers and calls a collection
  1089. function. Writes a jump instruction to the jump pad to
  1090. JJUMPAD_INSN. The caller is responsible to write it in at the
  1091. tracepoint address. */
  1092. static int
  1093. i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
  1094. CORE_ADDR collector,
  1095. CORE_ADDR lockaddr,
  1096. ULONGEST orig_size,
  1097. CORE_ADDR *jump_entry,
  1098. CORE_ADDR *trampoline,
  1099. ULONGEST *trampoline_size,
  1100. unsigned char *jjump_pad_insn,
  1101. ULONGEST *jjump_pad_insn_size,
  1102. CORE_ADDR *adjusted_insn_addr,
  1103. CORE_ADDR *adjusted_insn_addr_end,
  1104. char *err)
  1105. {
  1106. unsigned char buf[0x100];
  1107. int i, offset;
  1108. CORE_ADDR buildaddr = *jump_entry;
  1109. /* Build the jump pad. */
  1110. /* First, do tracepoint data collection. Save registers. */
  1111. i = 0;
  1112. buf[i++] = 0x60; /* pushad */
  1113. buf[i++] = 0x68; /* push tpaddr aka $pc */
  1114. *((int *)(buf + i)) = (int) tpaddr;
  1115. i += 4;
  1116. buf[i++] = 0x9c; /* pushf */
  1117. buf[i++] = 0x1e; /* push %ds */
  1118. buf[i++] = 0x06; /* push %es */
  1119. buf[i++] = 0x0f; /* push %fs */
  1120. buf[i++] = 0xa0;
  1121. buf[i++] = 0x0f; /* push %gs */
  1122. buf[i++] = 0xa8;
  1123. buf[i++] = 0x16; /* push %ss */
  1124. buf[i++] = 0x0e; /* push %cs */
  1125. append_insns (&buildaddr, i, buf);
  1126. /* Stack space for the collecting_t object. */
  1127. i = 0;
  1128. i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
  1129. /* Build the object. */
  1130. i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
  1131. memcpy (buf + i, &tpoint, 4);
  1132. i += 4;
  1133. i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
  1134. i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
  1135. i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
  1136. append_insns (&buildaddr, i, buf);
  1137. /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
  1138. If we cared for it, this could be using xchg alternatively. */
  1139. i = 0;
  1140. i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
  1141. i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
  1142. %esp,<lockaddr> */
  1143. memcpy (&buf[i], (void *) &lockaddr, 4);
  1144. i += 4;
  1145. i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
  1146. i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
  1147. append_insns (&buildaddr, i, buf);
  1148. /* Set up arguments to the gdb_collect call. */
  1149. i = 0;
  1150. i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
  1151. i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
  1152. i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
  1153. append_insns (&buildaddr, i, buf);
  1154. i = 0;
  1155. i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
  1156. append_insns (&buildaddr, i, buf);
  1157. i = 0;
  1158. i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
  1159. memcpy (&buf[i], (void *) &tpoint, 4);
  1160. i += 4;
  1161. append_insns (&buildaddr, i, buf);
  1162. buf[0] = 0xe8; /* call <reladdr> */
  1163. offset = collector - (buildaddr + sizeof (jump_insn));
  1164. memcpy (buf + 1, &offset, 4);
  1165. append_insns (&buildaddr, 5, buf);
  1166. /* Clean up after the call. */
  1167. buf[0] = 0x83; /* add $0x8,%esp */
  1168. buf[1] = 0xc4;
  1169. buf[2] = 0x08;
  1170. append_insns (&buildaddr, 3, buf);
  1171. /* Clear the spin-lock. This would need the LOCK prefix on older
  1172. broken archs. */
  1173. i = 0;
  1174. i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
  1175. i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
  1176. memcpy (buf + i, &lockaddr, 4);
  1177. i += 4;
  1178. append_insns (&buildaddr, i, buf);
  1179. /* Remove stack that had been used for the collect_t object. */
  1180. i = 0;
  1181. i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
  1182. append_insns (&buildaddr, i, buf);
  1183. i = 0;
  1184. buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
  1185. buf[i++] = 0xc4;
  1186. buf[i++] = 0x04;
  1187. buf[i++] = 0x17; /* pop %ss */
  1188. buf[i++] = 0x0f; /* pop %gs */
  1189. buf[i++] = 0xa9;
  1190. buf[i++] = 0x0f; /* pop %fs */
  1191. buf[i++] = 0xa1;
  1192. buf[i++] = 0x07; /* pop %es */
  1193. buf[i++] = 0x1f; /* pop %ds */
  1194. buf[i++] = 0x9d; /* popf */
  1195. buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
  1196. buf[i++] = 0xc4;
  1197. buf[i++] = 0x04;
  1198. buf[i++] = 0x61; /* popad */
  1199. append_insns (&buildaddr, i, buf);
  1200. /* Now, adjust the original instruction to execute in the jump
  1201. pad. */
  1202. *adjusted_insn_addr = buildaddr;
  1203. relocate_instruction (&buildaddr, tpaddr);
  1204. *adjusted_insn_addr_end = buildaddr;
  1205. /* Write the jump back to the program. */
  1206. offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
  1207. memcpy (buf, jump_insn, sizeof (jump_insn));
  1208. memcpy (buf + 1, &offset, 4);
  1209. append_insns (&buildaddr, sizeof (jump_insn), buf);
  1210. /* The jump pad is now built. Wire in a jump to our jump pad. This
  1211. is always done last (by our caller actually), so that we can
  1212. install fast tracepoints with threads running. This relies on
  1213. the agent's atomic write support. */
  1214. if (orig_size == 4)
  1215. {
  1216. /* Create a trampoline. */
  1217. *trampoline_size = sizeof (jump_insn);
  1218. if (!claim_trampoline_space (*trampoline_size, trampoline))
  1219. {
  1220. /* No trampoline space available. */
  1221. strcpy (err,
  1222. "E.Cannot allocate trampoline space needed for fast "
  1223. "tracepoints on 4-byte instructions.");
  1224. return 1;
  1225. }
  1226. offset = *jump_entry - (*trampoline + sizeof (jump_insn));
  1227. memcpy (buf, jump_insn, sizeof (jump_insn));
  1228. memcpy (buf + 1, &offset, 4);
  1229. target_write_memory (*trampoline, buf, sizeof (jump_insn));
  1230. /* Use a 16-bit relative jump instruction to jump to the trampoline. */
  1231. offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
  1232. memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
  1233. memcpy (buf + 2, &offset, 2);
  1234. memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
  1235. *jjump_pad_insn_size = sizeof (small_jump_insn);
  1236. }
  1237. else
  1238. {
  1239. /* Else use a 32-bit relative jump instruction. */
  1240. offset = *jump_entry - (tpaddr + sizeof (jump_insn));
  1241. memcpy (buf, jump_insn, sizeof (jump_insn));
  1242. memcpy (buf + 1, &offset, 4);
  1243. memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
  1244. *jjump_pad_insn_size = sizeof (jump_insn);
  1245. }
  1246. /* Return the end address of our pad. */
  1247. *jump_entry = buildaddr;
  1248. return 0;
  1249. }
  1250. bool
  1251. x86_target::supports_fast_tracepoints ()
  1252. {
  1253. return true;
  1254. }
  1255. int
  1256. x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
  1257. CORE_ADDR tpaddr,
  1258. CORE_ADDR collector,
  1259. CORE_ADDR lockaddr,
  1260. ULONGEST orig_size,
  1261. CORE_ADDR *jump_entry,
  1262. CORE_ADDR *trampoline,
  1263. ULONGEST *trampoline_size,
  1264. unsigned char *jjump_pad_insn,
  1265. ULONGEST *jjump_pad_insn_size,
  1266. CORE_ADDR *adjusted_insn_addr,
  1267. CORE_ADDR *adjusted_insn_addr_end,
  1268. char *err)
  1269. {
  1270. #ifdef __x86_64__
  1271. if (is_64bit_tdesc ())
  1272. return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
  1273. collector, lockaddr,
  1274. orig_size, jump_entry,
  1275. trampoline, trampoline_size,
  1276. jjump_pad_insn,
  1277. jjump_pad_insn_size,
  1278. adjusted_insn_addr,
  1279. adjusted_insn_addr_end,
  1280. err);
  1281. #endif
  1282. return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
  1283. collector, lockaddr,
  1284. orig_size, jump_entry,
  1285. trampoline, trampoline_size,
  1286. jjump_pad_insn,
  1287. jjump_pad_insn_size,
  1288. adjusted_insn_addr,
  1289. adjusted_insn_addr_end,
  1290. err);
  1291. }
  1292. /* Return the minimum instruction length for fast tracepoints on x86/x86-64
  1293. architectures. */
  1294. int
  1295. x86_target::get_min_fast_tracepoint_insn_len ()
  1296. {
  1297. static int warned_about_fast_tracepoints = 0;
  1298. #ifdef __x86_64__
  1299. /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
  1300. used for fast tracepoints. */
  1301. if (is_64bit_tdesc ())
  1302. return 5;
  1303. #endif
  1304. if (agent_loaded_p ())
  1305. {
  1306. char errbuf[IPA_BUFSIZ];
  1307. errbuf[0] = '\0';
  1308. /* On x86, if trampolines are available, then 4-byte jump instructions
  1309. with a 2-byte offset may be used, otherwise 5-byte jump instructions
  1310. with a 4-byte offset are used instead. */
  1311. if (have_fast_tracepoint_trampoline_buffer (errbuf))
  1312. return 4;
  1313. else
  1314. {
  1315. /* GDB has no channel to explain to user why a shorter fast
  1316. tracepoint is not possible, but at least make GDBserver
  1317. mention that something has gone awry. */
  1318. if (!warned_about_fast_tracepoints)
  1319. {
  1320. warning ("4-byte fast tracepoints not available; %s", errbuf);
  1321. warned_about_fast_tracepoints = 1;
  1322. }
  1323. return 5;
  1324. }
  1325. }
  1326. else
  1327. {
  1328. /* Indicate that the minimum length is currently unknown since the IPA
  1329. has not loaded yet. */
  1330. return 0;
  1331. }
  1332. }
  1333. static void
  1334. add_insns (unsigned char *start, int len)
  1335. {
  1336. CORE_ADDR buildaddr = current_insn_ptr;
  1337. threads_debug_printf ("Adding %d bytes of insn at %s",
  1338. len, paddress (buildaddr));
  1339. append_insns (&buildaddr, len, start);
  1340. current_insn_ptr = buildaddr;
  1341. }
  1342. /* Our general strategy for emitting code is to avoid specifying raw
  1343. bytes whenever possible, and instead copy a block of inline asm
  1344. that is embedded in the function. This is a little messy, because
  1345. we need to keep the compiler from discarding what looks like dead
  1346. code, plus suppress various warnings. */
  1347. #define EMIT_ASM(NAME, INSNS) \
  1348. do \
  1349. { \
  1350. extern unsigned char start_ ## NAME, end_ ## NAME; \
  1351. add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
  1352. __asm__ ("jmp end_" #NAME "\n" \
  1353. "\t" "start_" #NAME ":" \
  1354. "\t" INSNS "\n" \
  1355. "\t" "end_" #NAME ":"); \
  1356. } while (0)
  1357. #ifdef __x86_64__
  1358. #define EMIT_ASM32(NAME,INSNS) \
  1359. do \
  1360. { \
  1361. extern unsigned char start_ ## NAME, end_ ## NAME; \
  1362. add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
  1363. __asm__ (".code32\n" \
  1364. "\t" "jmp end_" #NAME "\n" \
  1365. "\t" "start_" #NAME ":\n" \
  1366. "\t" INSNS "\n" \
  1367. "\t" "end_" #NAME ":\n" \
  1368. ".code64\n"); \
  1369. } while (0)
  1370. #else
  1371. #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
  1372. #endif
  1373. #ifdef __x86_64__
  1374. static void
  1375. amd64_emit_prologue (void)
  1376. {
  1377. EMIT_ASM (amd64_prologue,
  1378. "pushq %rbp\n\t"
  1379. "movq %rsp,%rbp\n\t"
  1380. "sub $0x20,%rsp\n\t"
  1381. "movq %rdi,-8(%rbp)\n\t"
  1382. "movq %rsi,-16(%rbp)");
  1383. }
  1384. static void
  1385. amd64_emit_epilogue (void)
  1386. {
  1387. EMIT_ASM (amd64_epilogue,
  1388. "movq -16(%rbp),%rdi\n\t"
  1389. "movq %rax,(%rdi)\n\t"
  1390. "xor %rax,%rax\n\t"
  1391. "leave\n\t"
  1392. "ret");
  1393. }
  1394. static void
  1395. amd64_emit_add (void)
  1396. {
  1397. EMIT_ASM (amd64_add,
  1398. "add (%rsp),%rax\n\t"
  1399. "lea 0x8(%rsp),%rsp");
  1400. }
  1401. static void
  1402. amd64_emit_sub (void)
  1403. {
  1404. EMIT_ASM (amd64_sub,
  1405. "sub %rax,(%rsp)\n\t"
  1406. "pop %rax");
  1407. }
  1408. static void
  1409. amd64_emit_mul (void)
  1410. {
  1411. emit_error = 1;
  1412. }
  1413. static void
  1414. amd64_emit_lsh (void)
  1415. {
  1416. emit_error = 1;
  1417. }
  1418. static void
  1419. amd64_emit_rsh_signed (void)
  1420. {
  1421. emit_error = 1;
  1422. }
  1423. static void
  1424. amd64_emit_rsh_unsigned (void)
  1425. {
  1426. emit_error = 1;
  1427. }
  1428. static void
  1429. amd64_emit_ext (int arg)
  1430. {
  1431. switch (arg)
  1432. {
  1433. case 8:
  1434. EMIT_ASM (amd64_ext_8,
  1435. "cbtw\n\t"
  1436. "cwtl\n\t"
  1437. "cltq");
  1438. break;
  1439. case 16:
  1440. EMIT_ASM (amd64_ext_16,
  1441. "cwtl\n\t"
  1442. "cltq");
  1443. break;
  1444. case 32:
  1445. EMIT_ASM (amd64_ext_32,
  1446. "cltq");
  1447. break;
  1448. default:
  1449. emit_error = 1;
  1450. }
  1451. }
  1452. static void
  1453. amd64_emit_log_not (void)
  1454. {
  1455. EMIT_ASM (amd64_log_not,
  1456. "test %rax,%rax\n\t"
  1457. "sete %cl\n\t"
  1458. "movzbq %cl,%rax");
  1459. }
  1460. static void
  1461. amd64_emit_bit_and (void)
  1462. {
  1463. EMIT_ASM (amd64_and,
  1464. "and (%rsp),%rax\n\t"
  1465. "lea 0x8(%rsp),%rsp");
  1466. }
  1467. static void
  1468. amd64_emit_bit_or (void)
  1469. {
  1470. EMIT_ASM (amd64_or,
  1471. "or (%rsp),%rax\n\t"
  1472. "lea 0x8(%rsp),%rsp");
  1473. }
  1474. static void
  1475. amd64_emit_bit_xor (void)
  1476. {
  1477. EMIT_ASM (amd64_xor,
  1478. "xor (%rsp),%rax\n\t"
  1479. "lea 0x8(%rsp),%rsp");
  1480. }
  1481. static void
  1482. amd64_emit_bit_not (void)
  1483. {
  1484. EMIT_ASM (amd64_bit_not,
  1485. "xorq $0xffffffffffffffff,%rax");
  1486. }
  1487. static void
  1488. amd64_emit_equal (void)
  1489. {
  1490. EMIT_ASM (amd64_equal,
  1491. "cmp %rax,(%rsp)\n\t"
  1492. "je .Lamd64_equal_true\n\t"
  1493. "xor %rax,%rax\n\t"
  1494. "jmp .Lamd64_equal_end\n\t"
  1495. ".Lamd64_equal_true:\n\t"
  1496. "mov $0x1,%rax\n\t"
  1497. ".Lamd64_equal_end:\n\t"
  1498. "lea 0x8(%rsp),%rsp");
  1499. }
  1500. static void
  1501. amd64_emit_less_signed (void)
  1502. {
  1503. EMIT_ASM (amd64_less_signed,
  1504. "cmp %rax,(%rsp)\n\t"
  1505. "jl .Lamd64_less_signed_true\n\t"
  1506. "xor %rax,%rax\n\t"
  1507. "jmp .Lamd64_less_signed_end\n\t"
  1508. ".Lamd64_less_signed_true:\n\t"
  1509. "mov $1,%rax\n\t"
  1510. ".Lamd64_less_signed_end:\n\t"
  1511. "lea 0x8(%rsp),%rsp");
  1512. }
  1513. static void
  1514. amd64_emit_less_unsigned (void)
  1515. {
  1516. EMIT_ASM (amd64_less_unsigned,
  1517. "cmp %rax,(%rsp)\n\t"
  1518. "jb .Lamd64_less_unsigned_true\n\t"
  1519. "xor %rax,%rax\n\t"
  1520. "jmp .Lamd64_less_unsigned_end\n\t"
  1521. ".Lamd64_less_unsigned_true:\n\t"
  1522. "mov $1,%rax\n\t"
  1523. ".Lamd64_less_unsigned_end:\n\t"
  1524. "lea 0x8(%rsp),%rsp");
  1525. }
  1526. static void
  1527. amd64_emit_ref (int size)
  1528. {
  1529. switch (size)
  1530. {
  1531. case 1:
  1532. EMIT_ASM (amd64_ref1,
  1533. "movb (%rax),%al");
  1534. break;
  1535. case 2:
  1536. EMIT_ASM (amd64_ref2,
  1537. "movw (%rax),%ax");
  1538. break;
  1539. case 4:
  1540. EMIT_ASM (amd64_ref4,
  1541. "movl (%rax),%eax");
  1542. break;
  1543. case 8:
  1544. EMIT_ASM (amd64_ref8,
  1545. "movq (%rax),%rax");
  1546. break;
  1547. }
  1548. }
  1549. static void
  1550. amd64_emit_if_goto (int *offset_p, int *size_p)
  1551. {
  1552. EMIT_ASM (amd64_if_goto,
  1553. "mov %rax,%rcx\n\t"
  1554. "pop %rax\n\t"
  1555. "cmp $0,%rcx\n\t"
  1556. ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
  1557. if (offset_p)
  1558. *offset_p = 10;
  1559. if (size_p)
  1560. *size_p = 4;
  1561. }
  1562. static void
  1563. amd64_emit_goto (int *offset_p, int *size_p)
  1564. {
  1565. EMIT_ASM (amd64_goto,
  1566. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
  1567. if (offset_p)
  1568. *offset_p = 1;
  1569. if (size_p)
  1570. *size_p = 4;
  1571. }
  1572. static void
  1573. amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
  1574. {
  1575. int diff = (to - (from + size));
  1576. unsigned char buf[sizeof (int)];
  1577. if (size != 4)
  1578. {
  1579. emit_error = 1;
  1580. return;
  1581. }
  1582. memcpy (buf, &diff, sizeof (int));
  1583. target_write_memory (from, buf, sizeof (int));
  1584. }
  1585. static void
  1586. amd64_emit_const (LONGEST num)
  1587. {
  1588. unsigned char buf[16];
  1589. int i;
  1590. CORE_ADDR buildaddr = current_insn_ptr;
  1591. i = 0;
  1592. buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
  1593. memcpy (&buf[i], &num, sizeof (num));
  1594. i += 8;
  1595. append_insns (&buildaddr, i, buf);
  1596. current_insn_ptr = buildaddr;
  1597. }
  1598. static void
  1599. amd64_emit_call (CORE_ADDR fn)
  1600. {
  1601. unsigned char buf[16];
  1602. int i;
  1603. CORE_ADDR buildaddr;
  1604. LONGEST offset64;
  1605. /* The destination function being in the shared library, may be
  1606. >31-bits away off the compiled code pad. */
  1607. buildaddr = current_insn_ptr;
  1608. offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
  1609. i = 0;
  1610. if (offset64 > INT_MAX || offset64 < INT_MIN)
  1611. {
  1612. /* Offset is too large for a call. Use callq, but that requires
  1613. a register, so avoid it if possible. Use r10, since it is
  1614. call-clobbered, we don't have to push/pop it. */
  1615. buf[i++] = 0x48; /* mov $fn,%r10 */
  1616. buf[i++] = 0xba;
  1617. memcpy (buf + i, &fn, 8);
  1618. i += 8;
  1619. buf[i++] = 0xff; /* callq *%r10 */
  1620. buf[i++] = 0xd2;
  1621. }
  1622. else
  1623. {
  1624. int offset32 = offset64; /* we know we can't overflow here. */
  1625. buf[i++] = 0xe8; /* call <reladdr> */
  1626. memcpy (buf + i, &offset32, 4);
  1627. i += 4;
  1628. }
  1629. append_insns (&buildaddr, i, buf);
  1630. current_insn_ptr = buildaddr;
  1631. }
  1632. static void
  1633. amd64_emit_reg (int reg)
  1634. {
  1635. unsigned char buf[16];
  1636. int i;
  1637. CORE_ADDR buildaddr;
  1638. /* Assume raw_regs is still in %rdi. */
  1639. buildaddr = current_insn_ptr;
  1640. i = 0;
  1641. buf[i++] = 0xbe; /* mov $<n>,%esi */
  1642. memcpy (&buf[i], &reg, sizeof (reg));
  1643. i += 4;
  1644. append_insns (&buildaddr, i, buf);
  1645. current_insn_ptr = buildaddr;
  1646. amd64_emit_call (get_raw_reg_func_addr ());
  1647. }
  1648. static void
  1649. amd64_emit_pop (void)
  1650. {
  1651. EMIT_ASM (amd64_pop,
  1652. "pop %rax");
  1653. }
  1654. static void
  1655. amd64_emit_stack_flush (void)
  1656. {
  1657. EMIT_ASM (amd64_stack_flush,
  1658. "push %rax");
  1659. }
  1660. static void
  1661. amd64_emit_zero_ext (int arg)
  1662. {
  1663. switch (arg)
  1664. {
  1665. case 8:
  1666. EMIT_ASM (amd64_zero_ext_8,
  1667. "and $0xff,%rax");
  1668. break;
  1669. case 16:
  1670. EMIT_ASM (amd64_zero_ext_16,
  1671. "and $0xffff,%rax");
  1672. break;
  1673. case 32:
  1674. EMIT_ASM (amd64_zero_ext_32,
  1675. "mov $0xffffffff,%rcx\n\t"
  1676. "and %rcx,%rax");
  1677. break;
  1678. default:
  1679. emit_error = 1;
  1680. }
  1681. }
  1682. static void
  1683. amd64_emit_swap (void)
  1684. {
  1685. EMIT_ASM (amd64_swap,
  1686. "mov %rax,%rcx\n\t"
  1687. "pop %rax\n\t"
  1688. "push %rcx");
  1689. }
  1690. static void
  1691. amd64_emit_stack_adjust (int n)
  1692. {
  1693. unsigned char buf[16];
  1694. int i;
  1695. CORE_ADDR buildaddr = current_insn_ptr;
  1696. i = 0;
  1697. buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
  1698. buf[i++] = 0x8d;
  1699. buf[i++] = 0x64;
  1700. buf[i++] = 0x24;
  1701. /* This only handles adjustments up to 16, but we don't expect any more. */
  1702. buf[i++] = n * 8;
  1703. append_insns (&buildaddr, i, buf);
  1704. current_insn_ptr = buildaddr;
  1705. }
  1706. /* FN's prototype is `LONGEST(*fn)(int)'. */
  1707. static void
  1708. amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
  1709. {
  1710. unsigned char buf[16];
  1711. int i;
  1712. CORE_ADDR buildaddr;
  1713. buildaddr = current_insn_ptr;
  1714. i = 0;
  1715. buf[i++] = 0xbf; /* movl $<n>,%edi */
  1716. memcpy (&buf[i], &arg1, sizeof (arg1));
  1717. i += 4;
  1718. append_insns (&buildaddr, i, buf);
  1719. current_insn_ptr = buildaddr;
  1720. amd64_emit_call (fn);
  1721. }
  1722. /* FN's prototype is `void(*fn)(int,LONGEST)'. */
  1723. static void
  1724. amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
  1725. {
  1726. unsigned char buf[16];
  1727. int i;
  1728. CORE_ADDR buildaddr;
  1729. buildaddr = current_insn_ptr;
  1730. i = 0;
  1731. buf[i++] = 0xbf; /* movl $<n>,%edi */
  1732. memcpy (&buf[i], &arg1, sizeof (arg1));
  1733. i += 4;
  1734. append_insns (&buildaddr, i, buf);
  1735. current_insn_ptr = buildaddr;
  1736. EMIT_ASM (amd64_void_call_2_a,
  1737. /* Save away a copy of the stack top. */
  1738. "push %rax\n\t"
  1739. /* Also pass top as the second argument. */
  1740. "mov %rax,%rsi");
  1741. amd64_emit_call (fn);
  1742. EMIT_ASM (amd64_void_call_2_b,
  1743. /* Restore the stack top, %rax may have been trashed. */
  1744. "pop %rax");
  1745. }
  1746. static void
  1747. amd64_emit_eq_goto (int *offset_p, int *size_p)
  1748. {
  1749. EMIT_ASM (amd64_eq,
  1750. "cmp %rax,(%rsp)\n\t"
  1751. "jne .Lamd64_eq_fallthru\n\t"
  1752. "lea 0x8(%rsp),%rsp\n\t"
  1753. "pop %rax\n\t"
  1754. /* jmp, but don't trust the assembler to choose the right jump */
  1755. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  1756. ".Lamd64_eq_fallthru:\n\t"
  1757. "lea 0x8(%rsp),%rsp\n\t"
  1758. "pop %rax");
  1759. if (offset_p)
  1760. *offset_p = 13;
  1761. if (size_p)
  1762. *size_p = 4;
  1763. }
  1764. static void
  1765. amd64_emit_ne_goto (int *offset_p, int *size_p)
  1766. {
  1767. EMIT_ASM (amd64_ne,
  1768. "cmp %rax,(%rsp)\n\t"
  1769. "je .Lamd64_ne_fallthru\n\t"
  1770. "lea 0x8(%rsp),%rsp\n\t"
  1771. "pop %rax\n\t"
  1772. /* jmp, but don't trust the assembler to choose the right jump */
  1773. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  1774. ".Lamd64_ne_fallthru:\n\t"
  1775. "lea 0x8(%rsp),%rsp\n\t"
  1776. "pop %rax");
  1777. if (offset_p)
  1778. *offset_p = 13;
  1779. if (size_p)
  1780. *size_p = 4;
  1781. }
  1782. static void
  1783. amd64_emit_lt_goto (int *offset_p, int *size_p)
  1784. {
  1785. EMIT_ASM (amd64_lt,
  1786. "cmp %rax,(%rsp)\n\t"
  1787. "jnl .Lamd64_lt_fallthru\n\t"
  1788. "lea 0x8(%rsp),%rsp\n\t"
  1789. "pop %rax\n\t"
  1790. /* jmp, but don't trust the assembler to choose the right jump */
  1791. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  1792. ".Lamd64_lt_fallthru:\n\t"
  1793. "lea 0x8(%rsp),%rsp\n\t"
  1794. "pop %rax");
  1795. if (offset_p)
  1796. *offset_p = 13;
  1797. if (size_p)
  1798. *size_p = 4;
  1799. }
  1800. static void
  1801. amd64_emit_le_goto (int *offset_p, int *size_p)
  1802. {
  1803. EMIT_ASM (amd64_le,
  1804. "cmp %rax,(%rsp)\n\t"
  1805. "jnle .Lamd64_le_fallthru\n\t"
  1806. "lea 0x8(%rsp),%rsp\n\t"
  1807. "pop %rax\n\t"
  1808. /* jmp, but don't trust the assembler to choose the right jump */
  1809. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  1810. ".Lamd64_le_fallthru:\n\t"
  1811. "lea 0x8(%rsp),%rsp\n\t"
  1812. "pop %rax");
  1813. if (offset_p)
  1814. *offset_p = 13;
  1815. if (size_p)
  1816. *size_p = 4;
  1817. }
  1818. static void
  1819. amd64_emit_gt_goto (int *offset_p, int *size_p)
  1820. {
  1821. EMIT_ASM (amd64_gt,
  1822. "cmp %rax,(%rsp)\n\t"
  1823. "jng .Lamd64_gt_fallthru\n\t"
  1824. "lea 0x8(%rsp),%rsp\n\t"
  1825. "pop %rax\n\t"
  1826. /* jmp, but don't trust the assembler to choose the right jump */
  1827. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  1828. ".Lamd64_gt_fallthru:\n\t"
  1829. "lea 0x8(%rsp),%rsp\n\t"
  1830. "pop %rax");
  1831. if (offset_p)
  1832. *offset_p = 13;
  1833. if (size_p)
  1834. *size_p = 4;
  1835. }
  1836. static void
  1837. amd64_emit_ge_goto (int *offset_p, int *size_p)
  1838. {
  1839. EMIT_ASM (amd64_ge,
  1840. "cmp %rax,(%rsp)\n\t"
  1841. "jnge .Lamd64_ge_fallthru\n\t"
  1842. ".Lamd64_ge_jump:\n\t"
  1843. "lea 0x8(%rsp),%rsp\n\t"
  1844. "pop %rax\n\t"
  1845. /* jmp, but don't trust the assembler to choose the right jump */
  1846. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  1847. ".Lamd64_ge_fallthru:\n\t"
  1848. "lea 0x8(%rsp),%rsp\n\t"
  1849. "pop %rax");
  1850. if (offset_p)
  1851. *offset_p = 13;
  1852. if (size_p)
  1853. *size_p = 4;
  1854. }
  1855. static emit_ops amd64_emit_ops =
  1856. {
  1857. amd64_emit_prologue,
  1858. amd64_emit_epilogue,
  1859. amd64_emit_add,
  1860. amd64_emit_sub,
  1861. amd64_emit_mul,
  1862. amd64_emit_lsh,
  1863. amd64_emit_rsh_signed,
  1864. amd64_emit_rsh_unsigned,
  1865. amd64_emit_ext,
  1866. amd64_emit_log_not,
  1867. amd64_emit_bit_and,
  1868. amd64_emit_bit_or,
  1869. amd64_emit_bit_xor,
  1870. amd64_emit_bit_not,
  1871. amd64_emit_equal,
  1872. amd64_emit_less_signed,
  1873. amd64_emit_less_unsigned,
  1874. amd64_emit_ref,
  1875. amd64_emit_if_goto,
  1876. amd64_emit_goto,
  1877. amd64_write_goto_address,
  1878. amd64_emit_const,
  1879. amd64_emit_call,
  1880. amd64_emit_reg,
  1881. amd64_emit_pop,
  1882. amd64_emit_stack_flush,
  1883. amd64_emit_zero_ext,
  1884. amd64_emit_swap,
  1885. amd64_emit_stack_adjust,
  1886. amd64_emit_int_call_1,
  1887. amd64_emit_void_call_2,
  1888. amd64_emit_eq_goto,
  1889. amd64_emit_ne_goto,
  1890. amd64_emit_lt_goto,
  1891. amd64_emit_le_goto,
  1892. amd64_emit_gt_goto,
  1893. amd64_emit_ge_goto
  1894. };
  1895. #endif /* __x86_64__ */
  1896. static void
  1897. i386_emit_prologue (void)
  1898. {
  1899. EMIT_ASM32 (i386_prologue,
  1900. "push %ebp\n\t"
  1901. "mov %esp,%ebp\n\t"
  1902. "push %ebx");
  1903. /* At this point, the raw regs base address is at 8(%ebp), and the
  1904. value pointer is at 12(%ebp). */
  1905. }
  1906. static void
  1907. i386_emit_epilogue (void)
  1908. {
  1909. EMIT_ASM32 (i386_epilogue,
  1910. "mov 12(%ebp),%ecx\n\t"
  1911. "mov %eax,(%ecx)\n\t"
  1912. "mov %ebx,0x4(%ecx)\n\t"
  1913. "xor %eax,%eax\n\t"
  1914. "pop %ebx\n\t"
  1915. "pop %ebp\n\t"
  1916. "ret");
  1917. }
  1918. static void
  1919. i386_emit_add (void)
  1920. {
  1921. EMIT_ASM32 (i386_add,
  1922. "add (%esp),%eax\n\t"
  1923. "adc 0x4(%esp),%ebx\n\t"
  1924. "lea 0x8(%esp),%esp");
  1925. }
  1926. static void
  1927. i386_emit_sub (void)
  1928. {
  1929. EMIT_ASM32 (i386_sub,
  1930. "subl %eax,(%esp)\n\t"
  1931. "sbbl %ebx,4(%esp)\n\t"
  1932. "pop %eax\n\t"
  1933. "pop %ebx\n\t");
  1934. }
  1935. static void
  1936. i386_emit_mul (void)
  1937. {
  1938. emit_error = 1;
  1939. }
  1940. static void
  1941. i386_emit_lsh (void)
  1942. {
  1943. emit_error = 1;
  1944. }
  1945. static void
  1946. i386_emit_rsh_signed (void)
  1947. {
  1948. emit_error = 1;
  1949. }
  1950. static void
  1951. i386_emit_rsh_unsigned (void)
  1952. {
  1953. emit_error = 1;
  1954. }
  1955. static void
  1956. i386_emit_ext (int arg)
  1957. {
  1958. switch (arg)
  1959. {
  1960. case 8:
  1961. EMIT_ASM32 (i386_ext_8,
  1962. "cbtw\n\t"
  1963. "cwtl\n\t"
  1964. "movl %eax,%ebx\n\t"
  1965. "sarl $31,%ebx");
  1966. break;
  1967. case 16:
  1968. EMIT_ASM32 (i386_ext_16,
  1969. "cwtl\n\t"
  1970. "movl %eax,%ebx\n\t"
  1971. "sarl $31,%ebx");
  1972. break;
  1973. case 32:
  1974. EMIT_ASM32 (i386_ext_32,
  1975. "movl %eax,%ebx\n\t"
  1976. "sarl $31,%ebx");
  1977. break;
  1978. default:
  1979. emit_error = 1;
  1980. }
  1981. }
  1982. static void
  1983. i386_emit_log_not (void)
  1984. {
  1985. EMIT_ASM32 (i386_log_not,
  1986. "or %ebx,%eax\n\t"
  1987. "test %eax,%eax\n\t"
  1988. "sete %cl\n\t"
  1989. "xor %ebx,%ebx\n\t"
  1990. "movzbl %cl,%eax");
  1991. }
  1992. static void
  1993. i386_emit_bit_and (void)
  1994. {
  1995. EMIT_ASM32 (i386_and,
  1996. "and (%esp),%eax\n\t"
  1997. "and 0x4(%esp),%ebx\n\t"
  1998. "lea 0x8(%esp),%esp");
  1999. }
  2000. static void
  2001. i386_emit_bit_or (void)
  2002. {
  2003. EMIT_ASM32 (i386_or,
  2004. "or (%esp),%eax\n\t"
  2005. "or 0x4(%esp),%ebx\n\t"
  2006. "lea 0x8(%esp),%esp");
  2007. }
  2008. static void
  2009. i386_emit_bit_xor (void)
  2010. {
  2011. EMIT_ASM32 (i386_xor,
  2012. "xor (%esp),%eax\n\t"
  2013. "xor 0x4(%esp),%ebx\n\t"
  2014. "lea 0x8(%esp),%esp");
  2015. }
  2016. static void
  2017. i386_emit_bit_not (void)
  2018. {
  2019. EMIT_ASM32 (i386_bit_not,
  2020. "xor $0xffffffff,%eax\n\t"
  2021. "xor $0xffffffff,%ebx\n\t");
  2022. }
  2023. static void
  2024. i386_emit_equal (void)
  2025. {
  2026. EMIT_ASM32 (i386_equal,
  2027. "cmpl %ebx,4(%esp)\n\t"
  2028. "jne .Li386_equal_false\n\t"
  2029. "cmpl %eax,(%esp)\n\t"
  2030. "je .Li386_equal_true\n\t"
  2031. ".Li386_equal_false:\n\t"
  2032. "xor %eax,%eax\n\t"
  2033. "jmp .Li386_equal_end\n\t"
  2034. ".Li386_equal_true:\n\t"
  2035. "mov $1,%eax\n\t"
  2036. ".Li386_equal_end:\n\t"
  2037. "xor %ebx,%ebx\n\t"
  2038. "lea 0x8(%esp),%esp");
  2039. }
  2040. static void
  2041. i386_emit_less_signed (void)
  2042. {
  2043. EMIT_ASM32 (i386_less_signed,
  2044. "cmpl %ebx,4(%esp)\n\t"
  2045. "jl .Li386_less_signed_true\n\t"
  2046. "jne .Li386_less_signed_false\n\t"
  2047. "cmpl %eax,(%esp)\n\t"
  2048. "jl .Li386_less_signed_true\n\t"
  2049. ".Li386_less_signed_false:\n\t"
  2050. "xor %eax,%eax\n\t"
  2051. "jmp .Li386_less_signed_end\n\t"
  2052. ".Li386_less_signed_true:\n\t"
  2053. "mov $1,%eax\n\t"
  2054. ".Li386_less_signed_end:\n\t"
  2055. "xor %ebx,%ebx\n\t"
  2056. "lea 0x8(%esp),%esp");
  2057. }
  2058. static void
  2059. i386_emit_less_unsigned (void)
  2060. {
  2061. EMIT_ASM32 (i386_less_unsigned,
  2062. "cmpl %ebx,4(%esp)\n\t"
  2063. "jb .Li386_less_unsigned_true\n\t"
  2064. "jne .Li386_less_unsigned_false\n\t"
  2065. "cmpl %eax,(%esp)\n\t"
  2066. "jb .Li386_less_unsigned_true\n\t"
  2067. ".Li386_less_unsigned_false:\n\t"
  2068. "xor %eax,%eax\n\t"
  2069. "jmp .Li386_less_unsigned_end\n\t"
  2070. ".Li386_less_unsigned_true:\n\t"
  2071. "mov $1,%eax\n\t"
  2072. ".Li386_less_unsigned_end:\n\t"
  2073. "xor %ebx,%ebx\n\t"
  2074. "lea 0x8(%esp),%esp");
  2075. }
  2076. static void
  2077. i386_emit_ref (int size)
  2078. {
  2079. switch (size)
  2080. {
  2081. case 1:
  2082. EMIT_ASM32 (i386_ref1,
  2083. "movb (%eax),%al");
  2084. break;
  2085. case 2:
  2086. EMIT_ASM32 (i386_ref2,
  2087. "movw (%eax),%ax");
  2088. break;
  2089. case 4:
  2090. EMIT_ASM32 (i386_ref4,
  2091. "movl (%eax),%eax");
  2092. break;
  2093. case 8:
  2094. EMIT_ASM32 (i386_ref8,
  2095. "movl 4(%eax),%ebx\n\t"
  2096. "movl (%eax),%eax");
  2097. break;
  2098. }
  2099. }
  2100. static void
  2101. i386_emit_if_goto (int *offset_p, int *size_p)
  2102. {
  2103. EMIT_ASM32 (i386_if_goto,
  2104. "mov %eax,%ecx\n\t"
  2105. "or %ebx,%ecx\n\t"
  2106. "pop %eax\n\t"
  2107. "pop %ebx\n\t"
  2108. "cmpl $0,%ecx\n\t"
  2109. /* Don't trust the assembler to choose the right jump */
  2110. ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
  2111. if (offset_p)
  2112. *offset_p = 11; /* be sure that this matches the sequence above */
  2113. if (size_p)
  2114. *size_p = 4;
  2115. }
  2116. static void
  2117. i386_emit_goto (int *offset_p, int *size_p)
  2118. {
  2119. EMIT_ASM32 (i386_goto,
  2120. /* Don't trust the assembler to choose the right jump */
  2121. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
  2122. if (offset_p)
  2123. *offset_p = 1;
  2124. if (size_p)
  2125. *size_p = 4;
  2126. }
  2127. static void
  2128. i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
  2129. {
  2130. int diff = (to - (from + size));
  2131. unsigned char buf[sizeof (int)];
  2132. /* We're only doing 4-byte sizes at the moment. */
  2133. if (size != 4)
  2134. {
  2135. emit_error = 1;
  2136. return;
  2137. }
  2138. memcpy (buf, &diff, sizeof (int));
  2139. target_write_memory (from, buf, sizeof (int));
  2140. }
  2141. static void
  2142. i386_emit_const (LONGEST num)
  2143. {
  2144. unsigned char buf[16];
  2145. int i, hi, lo;
  2146. CORE_ADDR buildaddr = current_insn_ptr;
  2147. i = 0;
  2148. buf[i++] = 0xb8; /* mov $<n>,%eax */
  2149. lo = num & 0xffffffff;
  2150. memcpy (&buf[i], &lo, sizeof (lo));
  2151. i += 4;
  2152. hi = ((num >> 32) & 0xffffffff);
  2153. if (hi)
  2154. {
  2155. buf[i++] = 0xbb; /* mov $<n>,%ebx */
  2156. memcpy (&buf[i], &hi, sizeof (hi));
  2157. i += 4;
  2158. }
  2159. else
  2160. {
  2161. buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
  2162. }
  2163. append_insns (&buildaddr, i, buf);
  2164. current_insn_ptr = buildaddr;
  2165. }
  2166. static void
  2167. i386_emit_call (CORE_ADDR fn)
  2168. {
  2169. unsigned char buf[16];
  2170. int i, offset;
  2171. CORE_ADDR buildaddr;
  2172. buildaddr = current_insn_ptr;
  2173. i = 0;
  2174. buf[i++] = 0xe8; /* call <reladdr> */
  2175. offset = ((int) fn) - (buildaddr + 5);
  2176. memcpy (buf + 1, &offset, 4);
  2177. append_insns (&buildaddr, 5, buf);
  2178. current_insn_ptr = buildaddr;
  2179. }
  2180. static void
  2181. i386_emit_reg (int reg)
  2182. {
  2183. unsigned char buf[16];
  2184. int i;
  2185. CORE_ADDR buildaddr;
  2186. EMIT_ASM32 (i386_reg_a,
  2187. "sub $0x8,%esp");
  2188. buildaddr = current_insn_ptr;
  2189. i = 0;
  2190. buf[i++] = 0xb8; /* mov $<n>,%eax */
  2191. memcpy (&buf[i], &reg, sizeof (reg));
  2192. i += 4;
  2193. append_insns (&buildaddr, i, buf);
  2194. current_insn_ptr = buildaddr;
  2195. EMIT_ASM32 (i386_reg_b,
  2196. "mov %eax,4(%esp)\n\t"
  2197. "mov 8(%ebp),%eax\n\t"
  2198. "mov %eax,(%esp)");
  2199. i386_emit_call (get_raw_reg_func_addr ());
  2200. EMIT_ASM32 (i386_reg_c,
  2201. "xor %ebx,%ebx\n\t"
  2202. "lea 0x8(%esp),%esp");
  2203. }
  2204. static void
  2205. i386_emit_pop (void)
  2206. {
  2207. EMIT_ASM32 (i386_pop,
  2208. "pop %eax\n\t"
  2209. "pop %ebx");
  2210. }
  2211. static void
  2212. i386_emit_stack_flush (void)
  2213. {
  2214. EMIT_ASM32 (i386_stack_flush,
  2215. "push %ebx\n\t"
  2216. "push %eax");
  2217. }
  2218. static void
  2219. i386_emit_zero_ext (int arg)
  2220. {
  2221. switch (arg)
  2222. {
  2223. case 8:
  2224. EMIT_ASM32 (i386_zero_ext_8,
  2225. "and $0xff,%eax\n\t"
  2226. "xor %ebx,%ebx");
  2227. break;
  2228. case 16:
  2229. EMIT_ASM32 (i386_zero_ext_16,
  2230. "and $0xffff,%eax\n\t"
  2231. "xor %ebx,%ebx");
  2232. break;
  2233. case 32:
  2234. EMIT_ASM32 (i386_zero_ext_32,
  2235. "xor %ebx,%ebx");
  2236. break;
  2237. default:
  2238. emit_error = 1;
  2239. }
  2240. }
  2241. static void
  2242. i386_emit_swap (void)
  2243. {
  2244. EMIT_ASM32 (i386_swap,
  2245. "mov %eax,%ecx\n\t"
  2246. "mov %ebx,%edx\n\t"
  2247. "pop %eax\n\t"
  2248. "pop %ebx\n\t"
  2249. "push %edx\n\t"
  2250. "push %ecx");
  2251. }
  2252. static void
  2253. i386_emit_stack_adjust (int n)
  2254. {
  2255. unsigned char buf[16];
  2256. int i;
  2257. CORE_ADDR buildaddr = current_insn_ptr;
  2258. i = 0;
  2259. buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
  2260. buf[i++] = 0x64;
  2261. buf[i++] = 0x24;
  2262. buf[i++] = n * 8;
  2263. append_insns (&buildaddr, i, buf);
  2264. current_insn_ptr = buildaddr;
  2265. }
  2266. /* FN's prototype is `LONGEST(*fn)(int)'. */
  2267. static void
  2268. i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
  2269. {
  2270. unsigned char buf[16];
  2271. int i;
  2272. CORE_ADDR buildaddr;
  2273. EMIT_ASM32 (i386_int_call_1_a,
  2274. /* Reserve a bit of stack space. */
  2275. "sub $0x8,%esp");
  2276. /* Put the one argument on the stack. */
  2277. buildaddr = current_insn_ptr;
  2278. i = 0;
  2279. buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
  2280. buf[i++] = 0x04;
  2281. buf[i++] = 0x24;
  2282. memcpy (&buf[i], &arg1, sizeof (arg1));
  2283. i += 4;
  2284. append_insns (&buildaddr, i, buf);
  2285. current_insn_ptr = buildaddr;
  2286. i386_emit_call (fn);
  2287. EMIT_ASM32 (i386_int_call_1_c,
  2288. "mov %edx,%ebx\n\t"
  2289. "lea 0x8(%esp),%esp");
  2290. }
  2291. /* FN's prototype is `void(*fn)(int,LONGEST)'. */
  2292. static void
  2293. i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
  2294. {
  2295. unsigned char buf[16];
  2296. int i;
  2297. CORE_ADDR buildaddr;
  2298. EMIT_ASM32 (i386_void_call_2_a,
  2299. /* Preserve %eax only; we don't have to worry about %ebx. */
  2300. "push %eax\n\t"
  2301. /* Reserve a bit of stack space for arguments. */
  2302. "sub $0x10,%esp\n\t"
  2303. /* Copy "top" to the second argument position. (Note that
  2304. we can't assume function won't scribble on its
  2305. arguments, so don't try to restore from this.) */
  2306. "mov %eax,4(%esp)\n\t"
  2307. "mov %ebx,8(%esp)");
  2308. /* Put the first argument on the stack. */
  2309. buildaddr = current_insn_ptr;
  2310. i = 0;
  2311. buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
  2312. buf[i++] = 0x04;
  2313. buf[i++] = 0x24;
  2314. memcpy (&buf[i], &arg1, sizeof (arg1));
  2315. i += 4;
  2316. append_insns (&buildaddr, i, buf);
  2317. current_insn_ptr = buildaddr;
  2318. i386_emit_call (fn);
  2319. EMIT_ASM32 (i386_void_call_2_b,
  2320. "lea 0x10(%esp),%esp\n\t"
  2321. /* Restore original stack top. */
  2322. "pop %eax");
  2323. }
  2324. static void
  2325. i386_emit_eq_goto (int *offset_p, int *size_p)
  2326. {
  2327. EMIT_ASM32 (eq,
  2328. /* Check low half first, more likely to be decider */
  2329. "cmpl %eax,(%esp)\n\t"
  2330. "jne .Leq_fallthru\n\t"
  2331. "cmpl %ebx,4(%esp)\n\t"
  2332. "jne .Leq_fallthru\n\t"
  2333. "lea 0x8(%esp),%esp\n\t"
  2334. "pop %eax\n\t"
  2335. "pop %ebx\n\t"
  2336. /* jmp, but don't trust the assembler to choose the right jump */
  2337. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2338. ".Leq_fallthru:\n\t"
  2339. "lea 0x8(%esp),%esp\n\t"
  2340. "pop %eax\n\t"
  2341. "pop %ebx");
  2342. if (offset_p)
  2343. *offset_p = 18;
  2344. if (size_p)
  2345. *size_p = 4;
  2346. }
  2347. static void
  2348. i386_emit_ne_goto (int *offset_p, int *size_p)
  2349. {
  2350. EMIT_ASM32 (ne,
  2351. /* Check low half first, more likely to be decider */
  2352. "cmpl %eax,(%esp)\n\t"
  2353. "jne .Lne_jump\n\t"
  2354. "cmpl %ebx,4(%esp)\n\t"
  2355. "je .Lne_fallthru\n\t"
  2356. ".Lne_jump:\n\t"
  2357. "lea 0x8(%esp),%esp\n\t"
  2358. "pop %eax\n\t"
  2359. "pop %ebx\n\t"
  2360. /* jmp, but don't trust the assembler to choose the right jump */
  2361. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2362. ".Lne_fallthru:\n\t"
  2363. "lea 0x8(%esp),%esp\n\t"
  2364. "pop %eax\n\t"
  2365. "pop %ebx");
  2366. if (offset_p)
  2367. *offset_p = 18;
  2368. if (size_p)
  2369. *size_p = 4;
  2370. }
  2371. static void
  2372. i386_emit_lt_goto (int *offset_p, int *size_p)
  2373. {
  2374. EMIT_ASM32 (lt,
  2375. "cmpl %ebx,4(%esp)\n\t"
  2376. "jl .Llt_jump\n\t"
  2377. "jne .Llt_fallthru\n\t"
  2378. "cmpl %eax,(%esp)\n\t"
  2379. "jnl .Llt_fallthru\n\t"
  2380. ".Llt_jump:\n\t"
  2381. "lea 0x8(%esp),%esp\n\t"
  2382. "pop %eax\n\t"
  2383. "pop %ebx\n\t"
  2384. /* jmp, but don't trust the assembler to choose the right jump */
  2385. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2386. ".Llt_fallthru:\n\t"
  2387. "lea 0x8(%esp),%esp\n\t"
  2388. "pop %eax\n\t"
  2389. "pop %ebx");
  2390. if (offset_p)
  2391. *offset_p = 20;
  2392. if (size_p)
  2393. *size_p = 4;
  2394. }
  2395. static void
  2396. i386_emit_le_goto (int *offset_p, int *size_p)
  2397. {
  2398. EMIT_ASM32 (le,
  2399. "cmpl %ebx,4(%esp)\n\t"
  2400. "jle .Lle_jump\n\t"
  2401. "jne .Lle_fallthru\n\t"
  2402. "cmpl %eax,(%esp)\n\t"
  2403. "jnle .Lle_fallthru\n\t"
  2404. ".Lle_jump:\n\t"
  2405. "lea 0x8(%esp),%esp\n\t"
  2406. "pop %eax\n\t"
  2407. "pop %ebx\n\t"
  2408. /* jmp, but don't trust the assembler to choose the right jump */
  2409. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2410. ".Lle_fallthru:\n\t"
  2411. "lea 0x8(%esp),%esp\n\t"
  2412. "pop %eax\n\t"
  2413. "pop %ebx");
  2414. if (offset_p)
  2415. *offset_p = 20;
  2416. if (size_p)
  2417. *size_p = 4;
  2418. }
  2419. static void
  2420. i386_emit_gt_goto (int *offset_p, int *size_p)
  2421. {
  2422. EMIT_ASM32 (gt,
  2423. "cmpl %ebx,4(%esp)\n\t"
  2424. "jg .Lgt_jump\n\t"
  2425. "jne .Lgt_fallthru\n\t"
  2426. "cmpl %eax,(%esp)\n\t"
  2427. "jng .Lgt_fallthru\n\t"
  2428. ".Lgt_jump:\n\t"
  2429. "lea 0x8(%esp),%esp\n\t"
  2430. "pop %eax\n\t"
  2431. "pop %ebx\n\t"
  2432. /* jmp, but don't trust the assembler to choose the right jump */
  2433. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2434. ".Lgt_fallthru:\n\t"
  2435. "lea 0x8(%esp),%esp\n\t"
  2436. "pop %eax\n\t"
  2437. "pop %ebx");
  2438. if (offset_p)
  2439. *offset_p = 20;
  2440. if (size_p)
  2441. *size_p = 4;
  2442. }
  2443. static void
  2444. i386_emit_ge_goto (int *offset_p, int *size_p)
  2445. {
  2446. EMIT_ASM32 (ge,
  2447. "cmpl %ebx,4(%esp)\n\t"
  2448. "jge .Lge_jump\n\t"
  2449. "jne .Lge_fallthru\n\t"
  2450. "cmpl %eax,(%esp)\n\t"
  2451. "jnge .Lge_fallthru\n\t"
  2452. ".Lge_jump:\n\t"
  2453. "lea 0x8(%esp),%esp\n\t"
  2454. "pop %eax\n\t"
  2455. "pop %ebx\n\t"
  2456. /* jmp, but don't trust the assembler to choose the right jump */
  2457. ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
  2458. ".Lge_fallthru:\n\t"
  2459. "lea 0x8(%esp),%esp\n\t"
  2460. "pop %eax\n\t"
  2461. "pop %ebx");
  2462. if (offset_p)
  2463. *offset_p = 20;
  2464. if (size_p)
  2465. *size_p = 4;
  2466. }
  2467. static emit_ops i386_emit_ops =
  2468. {
  2469. i386_emit_prologue,
  2470. i386_emit_epilogue,
  2471. i386_emit_add,
  2472. i386_emit_sub,
  2473. i386_emit_mul,
  2474. i386_emit_lsh,
  2475. i386_emit_rsh_signed,
  2476. i386_emit_rsh_unsigned,
  2477. i386_emit_ext,
  2478. i386_emit_log_not,
  2479. i386_emit_bit_and,
  2480. i386_emit_bit_or,
  2481. i386_emit_bit_xor,
  2482. i386_emit_bit_not,
  2483. i386_emit_equal,
  2484. i386_emit_less_signed,
  2485. i386_emit_less_unsigned,
  2486. i386_emit_ref,
  2487. i386_emit_if_goto,
  2488. i386_emit_goto,
  2489. i386_write_goto_address,
  2490. i386_emit_const,
  2491. i386_emit_call,
  2492. i386_emit_reg,
  2493. i386_emit_pop,
  2494. i386_emit_stack_flush,
  2495. i386_emit_zero_ext,
  2496. i386_emit_swap,
  2497. i386_emit_stack_adjust,
  2498. i386_emit_int_call_1,
  2499. i386_emit_void_call_2,
  2500. i386_emit_eq_goto,
  2501. i386_emit_ne_goto,
  2502. i386_emit_lt_goto,
  2503. i386_emit_le_goto,
  2504. i386_emit_gt_goto,
  2505. i386_emit_ge_goto
  2506. };
  2507. emit_ops *
  2508. x86_target::emit_ops ()
  2509. {
  2510. #ifdef __x86_64__
  2511. if (is_64bit_tdesc ())
  2512. return &amd64_emit_ops;
  2513. else
  2514. #endif
  2515. return &i386_emit_ops;
  2516. }
  2517. /* Implementation of target ops method "sw_breakpoint_from_kind". */
  2518. const gdb_byte *
  2519. x86_target::sw_breakpoint_from_kind (int kind, int *size)
  2520. {
  2521. *size = x86_breakpoint_len;
  2522. return x86_breakpoint;
  2523. }
  2524. bool
  2525. x86_target::low_supports_range_stepping ()
  2526. {
  2527. return true;
  2528. }
  2529. int
  2530. x86_target::get_ipa_tdesc_idx ()
  2531. {
  2532. struct regcache *regcache = get_thread_regcache (current_thread, 0);
  2533. const struct target_desc *tdesc = regcache->tdesc;
  2534. #ifdef __x86_64__
  2535. return amd64_get_ipa_tdesc_idx (tdesc);
  2536. #endif
  2537. if (tdesc == tdesc_i386_linux_no_xml.get ())
  2538. return X86_TDESC_SSE;
  2539. return i386_get_ipa_tdesc_idx (tdesc);
  2540. }
  2541. /* The linux target ops object. */
  2542. linux_process_target *the_linux_target = &the_x86_target;
  2543. void
  2544. initialize_low_arch (void)
  2545. {
  2546. /* Initialize the Linux target descriptions. */
  2547. #ifdef __x86_64__
  2548. tdesc_amd64_linux_no_xml = allocate_target_description ();
  2549. copy_target_description (tdesc_amd64_linux_no_xml.get (),
  2550. amd64_linux_read_description (X86_XSTATE_SSE_MASK,
  2551. false));
  2552. tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
  2553. #endif
  2554. tdesc_i386_linux_no_xml = allocate_target_description ();
  2555. copy_target_description (tdesc_i386_linux_no_xml.get (),
  2556. i386_linux_read_description (X86_XSTATE_SSE_MASK));
  2557. tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
  2558. initialize_regsets_info (&x86_regsets_info);
  2559. }