amd64-windows-tdep.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389
  1. /* Copyright (C) 2009-2022 Free Software Foundation, Inc.
  2. This file is part of GDB.
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 3 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. #include "defs.h"
  14. #include "osabi.h"
  15. #include "amd64-tdep.h"
  16. #include "gdbsupport/x86-xstate.h"
  17. #include "gdbtypes.h"
  18. #include "gdbcore.h"
  19. #include "regcache.h"
  20. #include "windows-tdep.h"
  21. #include "frame.h"
  22. #include "objfiles.h"
  23. #include "frame-unwind.h"
  24. #include "coff/internal.h"
  25. #include "coff/i386.h"
  26. #include "coff/pe.h"
  27. #include "libcoff.h"
  28. #include "value.h"
  29. #include <algorithm>
  30. /* The registers used to pass integer arguments during a function call. */
  31. static int amd64_windows_dummy_call_integer_regs[] =
  32. {
  33. AMD64_RCX_REGNUM, /* %rcx */
  34. AMD64_RDX_REGNUM, /* %rdx */
  35. AMD64_R8_REGNUM, /* %r8 */
  36. AMD64_R9_REGNUM /* %r9 */
  37. };
  38. /* This vector maps GDB's idea of a register's number into an offset into
  39. the Windows API CONTEXT structure. */
  40. static int amd64_windows_gregset_reg_offset[] =
  41. {
  42. 120, /* Rax */
  43. 144, /* Rbx */
  44. 128, /* Rcx */
  45. 136, /* Rdx */
  46. 168, /* Rsi */
  47. 176, /* Rdi */
  48. 160, /* Rbp */
  49. 152, /* Rsp */
  50. 184, /* R8 */
  51. 192, /* R9 */
  52. 200, /* R10 */
  53. 208, /* R11 */
  54. 216, /* R12 */
  55. 224, /* R13 */
  56. 232, /* R14 */
  57. 240, /* R15 */
  58. 248, /* Rip */
  59. 68, /* EFlags */
  60. 56, /* SegCs */
  61. 66, /* SegSs */
  62. 58, /* SegDs */
  63. 60, /* SegEs */
  64. 62, /* SegFs */
  65. 64, /* SegGs */
  66. 288, /* FloatSave.FloatRegisters[0] */
  67. 304, /* FloatSave.FloatRegisters[1] */
  68. 320, /* FloatSave.FloatRegisters[2] */
  69. 336, /* FloatSave.FloatRegisters[3] */
  70. 352, /* FloatSave.FloatRegisters[4] */
  71. 368, /* FloatSave.FloatRegisters[5] */
  72. 384, /* FloatSave.FloatRegisters[6] */
  73. 400, /* FloatSave.FloatRegisters[7] */
  74. 256, /* FloatSave.ControlWord */
  75. 258, /* FloatSave.StatusWord */
  76. 260, /* FloatSave.TagWord */
  77. 268, /* FloatSave.ErrorSelector */
  78. 264, /* FloatSave.ErrorOffset */
  79. 276, /* FloatSave.DataSelector */
  80. 272, /* FloatSave.DataOffset */
  81. 268, /* FloatSave.ErrorSelector */
  82. 416, /* Xmm0 */
  83. 432, /* Xmm1 */
  84. 448, /* Xmm2 */
  85. 464, /* Xmm3 */
  86. 480, /* Xmm4 */
  87. 496, /* Xmm5 */
  88. 512, /* Xmm6 */
  89. 528, /* Xmm7 */
  90. 544, /* Xmm8 */
  91. 560, /* Xmm9 */
  92. 576, /* Xmm10 */
  93. 592, /* Xmm11 */
  94. 608, /* Xmm12 */
  95. 624, /* Xmm13 */
  96. 640, /* Xmm14 */
  97. 656, /* Xmm15 */
  98. 280, /* FloatSave.MxCsr */
  99. };
  100. #define AMD64_WINDOWS_SIZEOF_GREGSET 1232
  101. /* Return nonzero if an argument of type TYPE should be passed
  102. via one of the integer registers. */
  103. static int
  104. amd64_windows_passed_by_integer_register (struct type *type)
  105. {
  106. switch (type->code ())
  107. {
  108. case TYPE_CODE_INT:
  109. case TYPE_CODE_ENUM:
  110. case TYPE_CODE_BOOL:
  111. case TYPE_CODE_RANGE:
  112. case TYPE_CODE_CHAR:
  113. case TYPE_CODE_PTR:
  114. case TYPE_CODE_REF:
  115. case TYPE_CODE_RVALUE_REF:
  116. case TYPE_CODE_STRUCT:
  117. case TYPE_CODE_UNION:
  118. case TYPE_CODE_COMPLEX:
  119. return (TYPE_LENGTH (type) == 1
  120. || TYPE_LENGTH (type) == 2
  121. || TYPE_LENGTH (type) == 4
  122. || TYPE_LENGTH (type) == 8);
  123. default:
  124. return 0;
  125. }
  126. }
  127. /* Return nonzero if an argument of type TYPE should be passed
  128. via one of the XMM registers. */
  129. static int
  130. amd64_windows_passed_by_xmm_register (struct type *type)
  131. {
  132. return ((type->code () == TYPE_CODE_FLT
  133. || type->code () == TYPE_CODE_DECFLOAT)
  134. && (TYPE_LENGTH (type) == 4 || TYPE_LENGTH (type) == 8));
  135. }
  136. /* Return non-zero iff an argument of the given TYPE should be passed
  137. by pointer. */
  138. static int
  139. amd64_windows_passed_by_pointer (struct type *type)
  140. {
  141. if (amd64_windows_passed_by_integer_register (type))
  142. return 0;
  143. if (amd64_windows_passed_by_xmm_register (type))
  144. return 0;
  145. return 1;
  146. }
  147. /* For each argument that should be passed by pointer, reserve some
  148. stack space, store a copy of the argument on the stack, and replace
  149. the argument by its address. Return the new Stack Pointer value.
  150. NARGS is the number of arguments. ARGS is the array containing
  151. the value of each argument. SP is value of the Stack Pointer. */
  152. static CORE_ADDR
  153. amd64_windows_adjust_args_passed_by_pointer (struct value **args,
  154. int nargs, CORE_ADDR sp)
  155. {
  156. int i;
  157. for (i = 0; i < nargs; i++)
  158. if (amd64_windows_passed_by_pointer (value_type (args[i])))
  159. {
  160. struct type *type = value_type (args[i]);
  161. const gdb_byte *valbuf = value_contents (args[i]).data ();
  162. const int len = TYPE_LENGTH (type);
  163. /* Store a copy of that argument on the stack, aligned to
  164. a 16 bytes boundary, and then use the copy's address as
  165. the argument. */
  166. sp -= len;
  167. sp &= ~0xf;
  168. write_memory (sp, valbuf, len);
  169. args[i]
  170. = value_addr (value_from_contents_and_address (type, valbuf, sp));
  171. }
  172. return sp;
  173. }
  174. /* Store the value of ARG in register REGNO (right-justified).
  175. REGCACHE is the register cache. */
  176. static void
  177. amd64_windows_store_arg_in_reg (struct regcache *regcache,
  178. struct value *arg, int regno)
  179. {
  180. struct type *type = value_type (arg);
  181. const gdb_byte *valbuf = value_contents (arg).data ();
  182. gdb_byte buf[8];
  183. gdb_assert (TYPE_LENGTH (type) <= 8);
  184. memset (buf, 0, sizeof buf);
  185. memcpy (buf, valbuf, std::min (TYPE_LENGTH (type), (ULONGEST) 8));
  186. regcache->cooked_write (regno, buf);
  187. }
  188. /* Push the arguments for an inferior function call, and return
  189. the updated value of the SP (Stack Pointer).
  190. All arguments are identical to the arguments used in
  191. amd64_windows_push_dummy_call. */
  192. static CORE_ADDR
  193. amd64_windows_push_arguments (struct regcache *regcache, int nargs,
  194. struct value **args, CORE_ADDR sp,
  195. function_call_return_method return_method)
  196. {
  197. int reg_idx = 0;
  198. int i;
  199. struct value **stack_args = XALLOCAVEC (struct value *, nargs);
  200. int num_stack_args = 0;
  201. int num_elements = 0;
  202. int element = 0;
  203. /* First, handle the arguments passed by pointer.
  204. These arguments are replaced by pointers to a copy we are making
  205. in inferior memory. So use a copy of the ARGS table, to avoid
  206. modifying the original one. */
  207. {
  208. struct value **args1 = XALLOCAVEC (struct value *, nargs);
  209. memcpy (args1, args, nargs * sizeof (struct value *));
  210. sp = amd64_windows_adjust_args_passed_by_pointer (args1, nargs, sp);
  211. args = args1;
  212. }
  213. /* Reserve a register for the "hidden" argument. */
  214. if (return_method == return_method_struct)
  215. reg_idx++;
  216. for (i = 0; i < nargs; i++)
  217. {
  218. struct type *type = value_type (args[i]);
  219. int len = TYPE_LENGTH (type);
  220. int on_stack_p = 1;
  221. if (reg_idx < ARRAY_SIZE (amd64_windows_dummy_call_integer_regs))
  222. {
  223. if (amd64_windows_passed_by_integer_register (type))
  224. {
  225. amd64_windows_store_arg_in_reg
  226. (regcache, args[i],
  227. amd64_windows_dummy_call_integer_regs[reg_idx]);
  228. on_stack_p = 0;
  229. reg_idx++;
  230. }
  231. else if (amd64_windows_passed_by_xmm_register (type))
  232. {
  233. amd64_windows_store_arg_in_reg
  234. (regcache, args[i], AMD64_XMM0_REGNUM + reg_idx);
  235. /* In case of varargs, these parameters must also be
  236. passed via the integer registers. */
  237. amd64_windows_store_arg_in_reg
  238. (regcache, args[i],
  239. amd64_windows_dummy_call_integer_regs[reg_idx]);
  240. on_stack_p = 0;
  241. reg_idx++;
  242. }
  243. }
  244. if (on_stack_p)
  245. {
  246. num_elements += ((len + 7) / 8);
  247. stack_args[num_stack_args++] = args[i];
  248. }
  249. }
  250. /* Allocate space for the arguments on the stack, keeping it
  251. aligned on a 16 byte boundary. */
  252. sp -= num_elements * 8;
  253. sp &= ~0xf;
  254. /* Write out the arguments to the stack. */
  255. for (i = 0; i < num_stack_args; i++)
  256. {
  257. struct type *type = value_type (stack_args[i]);
  258. const gdb_byte *valbuf = value_contents (stack_args[i]).data ();
  259. write_memory (sp + element * 8, valbuf, TYPE_LENGTH (type));
  260. element += ((TYPE_LENGTH (type) + 7) / 8);
  261. }
  262. return sp;
  263. }
  264. /* Implement the "push_dummy_call" gdbarch method. */
  265. static CORE_ADDR
  266. amd64_windows_push_dummy_call
  267. (struct gdbarch *gdbarch, struct value *function,
  268. struct regcache *regcache, CORE_ADDR bp_addr,
  269. int nargs, struct value **args, CORE_ADDR sp,
  270. function_call_return_method return_method, CORE_ADDR struct_addr)
  271. {
  272. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  273. gdb_byte buf[8];
  274. /* Pass arguments. */
  275. sp = amd64_windows_push_arguments (regcache, nargs, args, sp,
  276. return_method);
  277. /* Pass "hidden" argument". */
  278. if (return_method == return_method_struct)
  279. {
  280. /* The "hidden" argument is passed throught the first argument
  281. register. */
  282. const int arg_regnum = amd64_windows_dummy_call_integer_regs[0];
  283. store_unsigned_integer (buf, 8, byte_order, struct_addr);
  284. regcache->cooked_write (arg_regnum, buf);
  285. }
  286. /* Reserve some memory on the stack for the integer-parameter
  287. registers, as required by the ABI. */
  288. sp -= ARRAY_SIZE (amd64_windows_dummy_call_integer_regs) * 8;
  289. /* Store return address. */
  290. sp -= 8;
  291. store_unsigned_integer (buf, 8, byte_order, bp_addr);
  292. write_memory (sp, buf, 8);
  293. /* Update the stack pointer... */
  294. store_unsigned_integer (buf, 8, byte_order, sp);
  295. regcache->cooked_write (AMD64_RSP_REGNUM, buf);
  296. /* ...and fake a frame pointer. */
  297. regcache->cooked_write (AMD64_RBP_REGNUM, buf);
  298. return sp + 16;
  299. }
  300. /* Implement the "return_value" gdbarch method for amd64-windows. */
  301. static enum return_value_convention
  302. amd64_windows_return_value (struct gdbarch *gdbarch, struct value *function,
  303. struct type *type, struct regcache *regcache,
  304. gdb_byte *readbuf, const gdb_byte *writebuf)
  305. {
  306. int len = TYPE_LENGTH (type);
  307. int regnum = -1;
  308. /* See if our value is returned through a register. If it is, then
  309. store the associated register number in REGNUM. */
  310. switch (type->code ())
  311. {
  312. case TYPE_CODE_FLT:
  313. /* floats, and doubles are returned via XMM0. */
  314. if (len == 4 || len == 8)
  315. regnum = AMD64_XMM0_REGNUM;
  316. break;
  317. case TYPE_CODE_ARRAY:
  318. /* __m128, __m128i and __m128d are returned via XMM0. */
  319. if (type->is_vector () && len == 16)
  320. {
  321. enum type_code code = TYPE_TARGET_TYPE (type)->code ();
  322. if (code == TYPE_CODE_INT || code == TYPE_CODE_FLT)
  323. {
  324. regnum = AMD64_XMM0_REGNUM;
  325. break;
  326. }
  327. }
  328. /* fall through */
  329. default:
  330. /* All other values that are 1, 2, 4 or 8 bytes long are returned
  331. via RAX. */
  332. if (len == 1 || len == 2 || len == 4 || len == 8)
  333. regnum = AMD64_RAX_REGNUM;
  334. else if (len == 16 && type->code () == TYPE_CODE_INT)
  335. regnum = AMD64_XMM0_REGNUM;
  336. break;
  337. }
  338. if (regnum < 0)
  339. {
  340. /* RAX contains the address where the return value has been stored. */
  341. if (readbuf)
  342. {
  343. ULONGEST addr;
  344. regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
  345. read_memory (addr, readbuf, TYPE_LENGTH (type));
  346. }
  347. return RETURN_VALUE_ABI_RETURNS_ADDRESS;
  348. }
  349. else
  350. {
  351. /* Extract the return value from the register where it was stored. */
  352. if (readbuf)
  353. regcache->raw_read_part (regnum, 0, len, readbuf);
  354. if (writebuf)
  355. regcache->raw_write_part (regnum, 0, len, writebuf);
  356. return RETURN_VALUE_REGISTER_CONVENTION;
  357. }
  358. }
  359. /* Check that the code pointed to by PC corresponds to a call to
  360. __main, skip it if so. Return PC otherwise. */
  361. static CORE_ADDR
  362. amd64_skip_main_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
  363. {
  364. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  365. gdb_byte op;
  366. target_read_memory (pc, &op, 1);
  367. if (op == 0xe8)
  368. {
  369. gdb_byte buf[4];
  370. if (target_read_memory (pc + 1, buf, sizeof buf) == 0)
  371. {
  372. struct bound_minimal_symbol s;
  373. CORE_ADDR call_dest;
  374. call_dest = pc + 5 + extract_signed_integer (buf, 4, byte_order);
  375. s = lookup_minimal_symbol_by_pc (call_dest);
  376. if (s.minsym != NULL
  377. && s.minsym->linkage_name () != NULL
  378. && strcmp (s.minsym->linkage_name (), "__main") == 0)
  379. pc += 5;
  380. }
  381. }
  382. return pc;
  383. }
  384. struct amd64_windows_frame_cache
  385. {
  386. /* ImageBase for the module. */
  387. CORE_ADDR image_base;
  388. /* Function start and end rva. */
  389. CORE_ADDR start_rva;
  390. CORE_ADDR end_rva;
  391. /* Next instruction to be executed. */
  392. CORE_ADDR pc;
  393. /* Current sp. */
  394. CORE_ADDR sp;
  395. /* Address of saved integer and xmm registers. */
  396. CORE_ADDR prev_reg_addr[16];
  397. CORE_ADDR prev_xmm_addr[16];
  398. /* These two next fields are set only for machine info frames. */
  399. /* Likewise for RIP. */
  400. CORE_ADDR prev_rip_addr;
  401. /* Likewise for RSP. */
  402. CORE_ADDR prev_rsp_addr;
  403. /* Address of the previous frame. */
  404. CORE_ADDR prev_sp;
  405. };
  406. /* Convert a Windows register number to gdb. */
  407. static const enum amd64_regnum amd64_windows_w2gdb_regnum[] =
  408. {
  409. AMD64_RAX_REGNUM,
  410. AMD64_RCX_REGNUM,
  411. AMD64_RDX_REGNUM,
  412. AMD64_RBX_REGNUM,
  413. AMD64_RSP_REGNUM,
  414. AMD64_RBP_REGNUM,
  415. AMD64_RSI_REGNUM,
  416. AMD64_RDI_REGNUM,
  417. AMD64_R8_REGNUM,
  418. AMD64_R9_REGNUM,
  419. AMD64_R10_REGNUM,
  420. AMD64_R11_REGNUM,
  421. AMD64_R12_REGNUM,
  422. AMD64_R13_REGNUM,
  423. AMD64_R14_REGNUM,
  424. AMD64_R15_REGNUM
  425. };
  426. /* Return TRUE iff PC is the range of the function corresponding to
  427. CACHE. */
  428. static int
  429. pc_in_range (CORE_ADDR pc, const struct amd64_windows_frame_cache *cache)
  430. {
  431. return (pc >= cache->image_base + cache->start_rva
  432. && pc < cache->image_base + cache->end_rva);
  433. }
  434. /* Try to recognize and decode an epilogue sequence.
  435. Return -1 if we fail to read the instructions for any reason.
  436. Return 1 if an epilogue sequence was recognized, 0 otherwise. */
  437. static int
  438. amd64_windows_frame_decode_epilogue (struct frame_info *this_frame,
  439. struct amd64_windows_frame_cache *cache)
  440. {
  441. /* According to MSDN an epilogue "must consist of either an add RSP,constant
  442. or lea RSP,constant[FPReg], followed by a series of zero or more 8-byte
  443. register pops and a return or a jmp".
  444. Furthermore, according to RtlVirtualUnwind, the complete list of
  445. epilog marker is:
  446. - ret [c3]
  447. - ret n [c2 imm16]
  448. - rep ret [f3 c3]
  449. - jmp imm8 | imm32 [eb rel8] or [e9 rel32]
  450. - jmp qword ptr imm32 - not handled
  451. - rex.w jmp reg [4X ff eY]
  452. */
  453. CORE_ADDR pc = cache->pc;
  454. CORE_ADDR cur_sp = cache->sp;
  455. struct gdbarch *gdbarch = get_frame_arch (this_frame);
  456. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  457. gdb_byte op;
  458. gdb_byte rex;
  459. /* We don't care about the instruction deallocating the frame:
  460. if it hasn't been executed, the pc is still in the body,
  461. if it has been executed, the following epilog decoding will work. */
  462. /* First decode:
  463. - pop reg [41 58-5f] or [58-5f]. */
  464. while (1)
  465. {
  466. /* Read opcode. */
  467. if (target_read_memory (pc, &op, 1) != 0)
  468. return -1;
  469. if (op >= 0x40 && op <= 0x4f)
  470. {
  471. /* REX prefix. */
  472. rex = op;
  473. /* Read opcode. */
  474. if (target_read_memory (pc + 1, &op, 1) != 0)
  475. return -1;
  476. }
  477. else
  478. rex = 0;
  479. if (op >= 0x58 && op <= 0x5f)
  480. {
  481. /* pop reg */
  482. gdb_byte reg = (op & 0x0f) | ((rex & 1) << 3);
  483. cache->prev_reg_addr[amd64_windows_w2gdb_regnum[reg]] = cur_sp;
  484. cur_sp += 8;
  485. pc += rex ? 2 : 1;
  486. }
  487. else
  488. break;
  489. /* Allow the user to break this loop. This shouldn't happen as the
  490. number of consecutive pop should be small. */
  491. QUIT;
  492. }
  493. /* Then decode the marker. */
  494. /* Read opcode. */
  495. if (target_read_memory (pc, &op, 1) != 0)
  496. return -1;
  497. switch (op)
  498. {
  499. case 0xc3:
  500. /* Ret. */
  501. cache->prev_rip_addr = cur_sp;
  502. cache->prev_sp = cur_sp + 8;
  503. return 1;
  504. case 0xeb:
  505. {
  506. /* jmp rel8 */
  507. gdb_byte rel8;
  508. CORE_ADDR npc;
  509. if (target_read_memory (pc + 1, &rel8, 1) != 0)
  510. return -1;
  511. npc = pc + 2 + (signed char) rel8;
  512. /* If the jump is within the function, then this is not a marker,
  513. otherwise this is a tail-call. */
  514. return !pc_in_range (npc, cache);
  515. }
  516. case 0xec:
  517. {
  518. /* jmp rel32 */
  519. gdb_byte rel32[4];
  520. CORE_ADDR npc;
  521. if (target_read_memory (pc + 1, rel32, 4) != 0)
  522. return -1;
  523. npc = pc + 5 + extract_signed_integer (rel32, 4, byte_order);
  524. /* If the jump is within the function, then this is not a marker,
  525. otherwise this is a tail-call. */
  526. return !pc_in_range (npc, cache);
  527. }
  528. case 0xc2:
  529. {
  530. /* ret n */
  531. gdb_byte imm16[2];
  532. if (target_read_memory (pc + 1, imm16, 2) != 0)
  533. return -1;
  534. cache->prev_rip_addr = cur_sp;
  535. cache->prev_sp = cur_sp
  536. + extract_unsigned_integer (imm16, 4, byte_order);
  537. return 1;
  538. }
  539. case 0xf3:
  540. {
  541. /* rep; ret */
  542. gdb_byte op1;
  543. if (target_read_memory (pc + 2, &op1, 1) != 0)
  544. return -1;
  545. if (op1 != 0xc3)
  546. return 0;
  547. cache->prev_rip_addr = cur_sp;
  548. cache->prev_sp = cur_sp + 8;
  549. return 1;
  550. }
  551. case 0x40:
  552. case 0x41:
  553. case 0x42:
  554. case 0x43:
  555. case 0x44:
  556. case 0x45:
  557. case 0x46:
  558. case 0x47:
  559. case 0x48:
  560. case 0x49:
  561. case 0x4a:
  562. case 0x4b:
  563. case 0x4c:
  564. case 0x4d:
  565. case 0x4e:
  566. case 0x4f:
  567. /* Got a REX prefix, read next byte. */
  568. rex = op;
  569. if (target_read_memory (pc + 1, &op, 1) != 0)
  570. return -1;
  571. if (op == 0xff)
  572. {
  573. /* rex jmp reg */
  574. gdb_byte op1;
  575. if (target_read_memory (pc + 2, &op1, 1) != 0)
  576. return -1;
  577. return (op1 & 0xf8) == 0xe0;
  578. }
  579. else
  580. return 0;
  581. default:
  582. /* Not REX, so unknown. */
  583. return 0;
  584. }
  585. }
  586. /* Decode and execute unwind insns at UNWIND_INFO. */
  587. static void
  588. amd64_windows_frame_decode_insns (struct frame_info *this_frame,
  589. struct amd64_windows_frame_cache *cache,
  590. CORE_ADDR unwind_info)
  591. {
  592. CORE_ADDR save_addr = 0;
  593. CORE_ADDR cur_sp = cache->sp;
  594. struct gdbarch *gdbarch = get_frame_arch (this_frame);
  595. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  596. int first = 1;
  597. /* There are at least 3 possibilities to share an unwind info entry:
  598. 1. Two different runtime_function entries (in .pdata) can point to the
  599. same unwind info entry. There is no such indication while unwinding,
  600. so we don't really care about that case. We suppose this scheme is
  601. used to save memory when the unwind entries are exactly the same.
  602. 2. Chained unwind_info entries, with no unwind codes (no prologue).
  603. There is a major difference with the previous case: the pc range for
  604. the function is different (in case 1, the pc range comes from the
  605. runtime_function entry; in case 2, the pc range for the chained entry
  606. comes from the first unwind entry). Case 1 cannot be used instead as
  607. the pc is not in the prologue. This case is officially documented.
  608. (There might be unwind code in the first unwind entry to handle
  609. additional unwinding). GCC (at least until gcc 5.0) doesn't chain
  610. entries.
  611. 3. Undocumented unwind info redirection. Hard to know the exact purpose,
  612. so it is considered as a memory optimization of case 2.
  613. */
  614. if (unwind_info & 1)
  615. {
  616. /* Unofficially documented unwind info redirection, when UNWIND_INFO
  617. address is odd (http://www.codemachine.com/article_x64deepdive.html).
  618. */
  619. struct external_pex64_runtime_function d;
  620. if (target_read_memory (cache->image_base + (unwind_info & ~1),
  621. (gdb_byte *) &d, sizeof (d)) != 0)
  622. return;
  623. cache->start_rva
  624. = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
  625. cache->end_rva
  626. = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
  627. unwind_info
  628. = extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
  629. }
  630. while (1)
  631. {
  632. struct external_pex64_unwind_info ex_ui;
  633. /* There are at most 256 16-bit unwind insns. */
  634. gdb_byte insns[2 * 256];
  635. gdb_byte *p;
  636. gdb_byte *end_insns;
  637. unsigned char codes_count;
  638. unsigned char frame_reg;
  639. CORE_ADDR start;
  640. /* Read and decode header. */
  641. if (target_read_memory (cache->image_base + unwind_info,
  642. (gdb_byte *) &ex_ui, sizeof (ex_ui)) != 0)
  643. return;
  644. frame_debug_printf ("%s: ver: %02x, plgsz: %02x, cnt: %02x, frame: %02x",
  645. paddress (gdbarch, unwind_info),
  646. ex_ui.Version_Flags, ex_ui.SizeOfPrologue,
  647. ex_ui.CountOfCodes, ex_ui.FrameRegisterOffset);
  648. /* Check version. */
  649. if (PEX64_UWI_VERSION (ex_ui.Version_Flags) != 1
  650. && PEX64_UWI_VERSION (ex_ui.Version_Flags) != 2)
  651. return;
  652. start = cache->image_base + cache->start_rva;
  653. if (first
  654. && !(cache->pc >= start && cache->pc < start + ex_ui.SizeOfPrologue))
  655. {
  656. /* We want to detect if the PC points to an epilogue. This needs
  657. to be checked only once, and an epilogue can be anywhere but in
  658. the prologue. If so, the epilogue detection+decoding function is
  659. sufficient. Otherwise, the unwinder will consider that the PC
  660. is in the body of the function and will need to decode unwind
  661. info. */
  662. if (amd64_windows_frame_decode_epilogue (this_frame, cache) == 1)
  663. return;
  664. /* Not in an epilog. Clear possible side effects. */
  665. memset (cache->prev_reg_addr, 0, sizeof (cache->prev_reg_addr));
  666. }
  667. codes_count = ex_ui.CountOfCodes;
  668. frame_reg = PEX64_UWI_FRAMEREG (ex_ui.FrameRegisterOffset);
  669. if (frame_reg != 0)
  670. {
  671. /* According to msdn:
  672. If an FP reg is used, then any unwind code taking an offset must
  673. only be used after the FP reg is established in the prolog. */
  674. gdb_byte buf[8];
  675. int frreg = amd64_windows_w2gdb_regnum[frame_reg];
  676. get_frame_register (this_frame, frreg, buf);
  677. save_addr = extract_unsigned_integer (buf, 8, byte_order);
  678. frame_debug_printf (" frame_reg=%s, val=%s",
  679. gdbarch_register_name (gdbarch, frreg),
  680. paddress (gdbarch, save_addr));
  681. }
  682. /* Read opcodes. */
  683. if (codes_count != 0
  684. && target_read_memory (cache->image_base + unwind_info
  685. + sizeof (ex_ui),
  686. insns, codes_count * 2) != 0)
  687. return;
  688. end_insns = &insns[codes_count * 2];
  689. p = insns;
  690. /* Skip opcodes 6 of version 2. This opcode is not documented. */
  691. if (PEX64_UWI_VERSION (ex_ui.Version_Flags) == 2)
  692. {
  693. for (; p < end_insns; p += 2)
  694. if (PEX64_UNWCODE_CODE (p[1]) != 6)
  695. break;
  696. }
  697. for (; p < end_insns; p += 2)
  698. {
  699. int reg;
  700. /* Virtually execute the operation if the pc is after the
  701. corresponding instruction (that does matter in case of break
  702. within the prologue). Note that for chained info (!first), the
  703. prologue has been fully executed. */
  704. if (cache->pc >= start + p[0] || cache->pc < start)
  705. {
  706. frame_debug_printf (" op #%u: off=0x%02x, insn=0x%02x",
  707. (unsigned) (p - insns), p[0], p[1]);
  708. /* If there is no frame registers defined, the current value of
  709. rsp is used instead. */
  710. if (frame_reg == 0)
  711. save_addr = cur_sp;
  712. reg = -1;
  713. switch (PEX64_UNWCODE_CODE (p[1]))
  714. {
  715. case UWOP_PUSH_NONVOL:
  716. /* Push pre-decrements RSP. */
  717. reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
  718. cache->prev_reg_addr[reg] = cur_sp;
  719. cur_sp += 8;
  720. break;
  721. case UWOP_ALLOC_LARGE:
  722. if (PEX64_UNWCODE_INFO (p[1]) == 0)
  723. cur_sp +=
  724. 8 * extract_unsigned_integer (p + 2, 2, byte_order);
  725. else if (PEX64_UNWCODE_INFO (p[1]) == 1)
  726. cur_sp += extract_unsigned_integer (p + 2, 4, byte_order);
  727. else
  728. return;
  729. break;
  730. case UWOP_ALLOC_SMALL:
  731. cur_sp += 8 + 8 * PEX64_UNWCODE_INFO (p[1]);
  732. break;
  733. case UWOP_SET_FPREG:
  734. cur_sp = save_addr
  735. - PEX64_UWI_FRAMEOFF (ex_ui.FrameRegisterOffset) * 16;
  736. break;
  737. case UWOP_SAVE_NONVOL:
  738. reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
  739. cache->prev_reg_addr[reg] = save_addr
  740. + 8 * extract_unsigned_integer (p + 2, 2, byte_order);
  741. break;
  742. case UWOP_SAVE_NONVOL_FAR:
  743. reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
  744. cache->prev_reg_addr[reg] = save_addr
  745. + 8 * extract_unsigned_integer (p + 2, 4, byte_order);
  746. break;
  747. case UWOP_SAVE_XMM128:
  748. cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
  749. save_addr
  750. - 16 * extract_unsigned_integer (p + 2, 2, byte_order);
  751. break;
  752. case UWOP_SAVE_XMM128_FAR:
  753. cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
  754. save_addr
  755. - 16 * extract_unsigned_integer (p + 2, 4, byte_order);
  756. break;
  757. case UWOP_PUSH_MACHFRAME:
  758. if (PEX64_UNWCODE_INFO (p[1]) == 0)
  759. {
  760. cache->prev_rip_addr = cur_sp + 0;
  761. cache->prev_rsp_addr = cur_sp + 24;
  762. cur_sp += 40;
  763. }
  764. else if (PEX64_UNWCODE_INFO (p[1]) == 1)
  765. {
  766. cache->prev_rip_addr = cur_sp + 8;
  767. cache->prev_rsp_addr = cur_sp + 32;
  768. cur_sp += 48;
  769. }
  770. else
  771. return;
  772. break;
  773. default:
  774. return;
  775. }
  776. /* Display address where the register was saved. */
  777. if (reg >= 0)
  778. frame_debug_printf (" [reg %s at %s]",
  779. gdbarch_register_name (gdbarch, reg),
  780. paddress (gdbarch,
  781. cache->prev_reg_addr[reg]));
  782. }
  783. /* Adjust with the length of the opcode. */
  784. switch (PEX64_UNWCODE_CODE (p[1]))
  785. {
  786. case UWOP_PUSH_NONVOL:
  787. case UWOP_ALLOC_SMALL:
  788. case UWOP_SET_FPREG:
  789. case UWOP_PUSH_MACHFRAME:
  790. break;
  791. case UWOP_ALLOC_LARGE:
  792. if (PEX64_UNWCODE_INFO (p[1]) == 0)
  793. p += 2;
  794. else if (PEX64_UNWCODE_INFO (p[1]) == 1)
  795. p += 4;
  796. else
  797. return;
  798. break;
  799. case UWOP_SAVE_NONVOL:
  800. case UWOP_SAVE_XMM128:
  801. p += 2;
  802. break;
  803. case UWOP_SAVE_NONVOL_FAR:
  804. case UWOP_SAVE_XMM128_FAR:
  805. p += 4;
  806. break;
  807. default:
  808. return;
  809. }
  810. }
  811. if (PEX64_UWI_FLAGS (ex_ui.Version_Flags) != UNW_FLAG_CHAININFO)
  812. {
  813. /* End of unwind info. */
  814. break;
  815. }
  816. else
  817. {
  818. /* Read the chained unwind info. */
  819. struct external_pex64_runtime_function d;
  820. CORE_ADDR chain_vma;
  821. /* Not anymore the first entry. */
  822. first = 0;
  823. /* Stay aligned on word boundary. */
  824. chain_vma = cache->image_base + unwind_info
  825. + sizeof (ex_ui) + ((codes_count + 1) & ~1) * 2;
  826. if (target_read_memory (chain_vma, (gdb_byte *) &d, sizeof (d)) != 0)
  827. return;
  828. /* Decode begin/end. This may be different from .pdata index, as
  829. an unwind info may be shared by several functions (in particular
  830. if many functions have the same prolog and handler. */
  831. cache->start_rva =
  832. extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
  833. cache->end_rva =
  834. extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
  835. unwind_info =
  836. extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
  837. frame_debug_printf ("next in chain: unwind_data=%s, start_rva=%s, "
  838. "end_rva=%s",
  839. paddress (gdbarch, unwind_info),
  840. paddress (gdbarch, cache->start_rva),
  841. paddress (gdbarch, cache->end_rva));
  842. }
  843. /* Allow the user to break this loop. */
  844. QUIT;
  845. }
  846. /* PC is saved by the call. */
  847. if (cache->prev_rip_addr == 0)
  848. cache->prev_rip_addr = cur_sp;
  849. cache->prev_sp = cur_sp + 8;
  850. frame_debug_printf (" prev_sp: %s, prev_pc @%s",
  851. paddress (gdbarch, cache->prev_sp),
  852. paddress (gdbarch, cache->prev_rip_addr));
  853. }
  854. /* Find SEH unwind info for PC, returning 0 on success.
  855. UNWIND_INFO is set to the rva of unwind info address, IMAGE_BASE
  856. to the base address of the corresponding image, and START_RVA
  857. to the rva of the function containing PC. */
  858. static int
  859. amd64_windows_find_unwind_info (struct gdbarch *gdbarch, CORE_ADDR pc,
  860. CORE_ADDR *unwind_info,
  861. CORE_ADDR *image_base,
  862. CORE_ADDR *start_rva,
  863. CORE_ADDR *end_rva)
  864. {
  865. struct obj_section *sec;
  866. pe_data_type *pe;
  867. IMAGE_DATA_DIRECTORY *dir;
  868. struct objfile *objfile;
  869. unsigned long lo, hi;
  870. CORE_ADDR base;
  871. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  872. /* Get the corresponding exception directory. */
  873. sec = find_pc_section (pc);
  874. if (sec == NULL)
  875. return -1;
  876. objfile = sec->objfile;
  877. pe = pe_data (sec->objfile->obfd);
  878. dir = &pe->pe_opthdr.DataDirectory[PE_EXCEPTION_TABLE];
  879. base = pe->pe_opthdr.ImageBase + objfile->text_section_offset ();
  880. *image_base = base;
  881. /* Find the entry.
  882. Note: This does not handle dynamically added entries (for JIT
  883. engines). For this, we would need to ask the kernel directly,
  884. which means getting some info from the native layer. For the
  885. rest of the code, however, it's probably faster to search
  886. the entry ourselves. */
  887. lo = 0;
  888. hi = dir->Size / sizeof (struct external_pex64_runtime_function);
  889. *unwind_info = 0;
  890. while (lo <= hi)
  891. {
  892. unsigned long mid = lo + (hi - lo) / 2;
  893. struct external_pex64_runtime_function d;
  894. CORE_ADDR sa, ea;
  895. if (target_read_memory (base + dir->VirtualAddress + mid * sizeof (d),
  896. (gdb_byte *) &d, sizeof (d)) != 0)
  897. return -1;
  898. sa = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
  899. ea = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
  900. if (pc < base + sa)
  901. hi = mid - 1;
  902. else if (pc >= base + ea)
  903. lo = mid + 1;
  904. else if (pc >= base + sa && pc < base + ea)
  905. {
  906. /* Got it. */
  907. *start_rva = sa;
  908. *end_rva = ea;
  909. *unwind_info =
  910. extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
  911. break;
  912. }
  913. else
  914. break;
  915. }
  916. frame_debug_printf ("image_base=%s, unwind_data=%s",
  917. paddress (gdbarch, base),
  918. paddress (gdbarch, *unwind_info));
  919. return 0;
  920. }
  921. /* Fill THIS_CACHE using the native amd64-windows unwinding data
  922. for THIS_FRAME. */
  923. static struct amd64_windows_frame_cache *
  924. amd64_windows_frame_cache (struct frame_info *this_frame, void **this_cache)
  925. {
  926. struct gdbarch *gdbarch = get_frame_arch (this_frame);
  927. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  928. struct amd64_windows_frame_cache *cache;
  929. gdb_byte buf[8];
  930. CORE_ADDR pc;
  931. CORE_ADDR unwind_info = 0;
  932. if (*this_cache)
  933. return (struct amd64_windows_frame_cache *) *this_cache;
  934. cache = FRAME_OBSTACK_ZALLOC (struct amd64_windows_frame_cache);
  935. *this_cache = cache;
  936. /* Get current PC and SP. */
  937. pc = get_frame_pc (this_frame);
  938. get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
  939. cache->sp = extract_unsigned_integer (buf, 8, byte_order);
  940. cache->pc = pc;
  941. if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
  942. &cache->image_base,
  943. &cache->start_rva,
  944. &cache->end_rva))
  945. return cache;
  946. if (unwind_info == 0)
  947. {
  948. /* Assume a leaf function. */
  949. cache->prev_sp = cache->sp + 8;
  950. cache->prev_rip_addr = cache->sp;
  951. }
  952. else
  953. {
  954. /* Decode unwind insns to compute saved addresses. */
  955. amd64_windows_frame_decode_insns (this_frame, cache, unwind_info);
  956. }
  957. return cache;
  958. }
  959. /* Implement the "prev_register" method of struct frame_unwind
  960. using the standard Windows x64 SEH info. */
  961. static struct value *
  962. amd64_windows_frame_prev_register (struct frame_info *this_frame,
  963. void **this_cache, int regnum)
  964. {
  965. struct gdbarch *gdbarch = get_frame_arch (this_frame);
  966. struct amd64_windows_frame_cache *cache =
  967. amd64_windows_frame_cache (this_frame, this_cache);
  968. CORE_ADDR prev;
  969. frame_debug_printf ("%s for sp=%s",
  970. gdbarch_register_name (gdbarch, regnum),
  971. paddress (gdbarch, cache->prev_sp));
  972. if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
  973. prev = cache->prev_xmm_addr[regnum - AMD64_XMM0_REGNUM];
  974. else if (regnum == AMD64_RSP_REGNUM)
  975. {
  976. prev = cache->prev_rsp_addr;
  977. if (prev == 0)
  978. return frame_unwind_got_constant (this_frame, regnum, cache->prev_sp);
  979. }
  980. else if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_R15_REGNUM)
  981. prev = cache->prev_reg_addr[regnum - AMD64_RAX_REGNUM];
  982. else if (regnum == AMD64_RIP_REGNUM)
  983. prev = cache->prev_rip_addr;
  984. else
  985. prev = 0;
  986. if (prev != 0)
  987. frame_debug_printf (" -> at %s", paddress (gdbarch, prev));
  988. if (prev)
  989. {
  990. /* Register was saved. */
  991. return frame_unwind_got_memory (this_frame, regnum, prev);
  992. }
  993. else
  994. {
  995. /* Register is either volatile or not modified. */
  996. return frame_unwind_got_register (this_frame, regnum, regnum);
  997. }
  998. }
  999. /* Implement the "this_id" method of struct frame_unwind using
  1000. the standard Windows x64 SEH info. */
  1001. static void
  1002. amd64_windows_frame_this_id (struct frame_info *this_frame, void **this_cache,
  1003. struct frame_id *this_id)
  1004. {
  1005. struct amd64_windows_frame_cache *cache =
  1006. amd64_windows_frame_cache (this_frame, this_cache);
  1007. *this_id = frame_id_build (cache->prev_sp,
  1008. cache->image_base + cache->start_rva);
  1009. }
  1010. /* Windows x64 SEH unwinder. */
  1011. static const struct frame_unwind amd64_windows_frame_unwind =
  1012. {
  1013. "amd64 windows",
  1014. NORMAL_FRAME,
  1015. default_frame_unwind_stop_reason,
  1016. &amd64_windows_frame_this_id,
  1017. &amd64_windows_frame_prev_register,
  1018. NULL,
  1019. default_frame_sniffer
  1020. };
  1021. /* Implement the "skip_prologue" gdbarch method. */
  1022. static CORE_ADDR
  1023. amd64_windows_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
  1024. {
  1025. CORE_ADDR func_addr;
  1026. CORE_ADDR unwind_info = 0;
  1027. CORE_ADDR image_base, start_rva, end_rva;
  1028. struct external_pex64_unwind_info ex_ui;
  1029. /* Use prologue size from unwind info. */
  1030. if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
  1031. &image_base, &start_rva, &end_rva) == 0)
  1032. {
  1033. if (unwind_info == 0)
  1034. {
  1035. /* Leaf function. */
  1036. return pc;
  1037. }
  1038. else if (target_read_memory (image_base + unwind_info,
  1039. (gdb_byte *) &ex_ui, sizeof (ex_ui)) == 0
  1040. && PEX64_UWI_VERSION (ex_ui.Version_Flags) == 1)
  1041. return std::max (pc, image_base + start_rva + ex_ui.SizeOfPrologue);
  1042. }
  1043. /* See if we can determine the end of the prologue via the symbol
  1044. table. If so, then return either the PC, or the PC after
  1045. the prologue, whichever is greater. */
  1046. if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
  1047. {
  1048. CORE_ADDR post_prologue_pc
  1049. = skip_prologue_using_sal (gdbarch, func_addr);
  1050. if (post_prologue_pc != 0)
  1051. return std::max (pc, post_prologue_pc);
  1052. }
  1053. return pc;
  1054. }
  1055. /* Check Win64 DLL jmp trampolines and find jump destination. */
  1056. static CORE_ADDR
  1057. amd64_windows_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
  1058. {
  1059. CORE_ADDR destination = 0;
  1060. struct gdbarch *gdbarch = get_frame_arch (frame);
  1061. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  1062. /* Check for jmp *<offset>(%rip) (jump near, absolute indirect (/4)). */
  1063. if (pc && read_memory_unsigned_integer (pc, 2, byte_order) == 0x25ff)
  1064. {
  1065. /* Get opcode offset and see if we can find a reference in our data. */
  1066. ULONGEST offset
  1067. = read_memory_unsigned_integer (pc + 2, 4, byte_order);
  1068. /* Get address of function pointer at end of pc. */
  1069. CORE_ADDR indirect_addr = pc + offset + 6;
  1070. struct minimal_symbol *indsym
  1071. = (indirect_addr
  1072. ? lookup_minimal_symbol_by_pc (indirect_addr).minsym
  1073. : NULL);
  1074. const char *symname = indsym ? indsym->linkage_name () : NULL;
  1075. if (symname)
  1076. {
  1077. if (startswith (symname, "__imp_")
  1078. || startswith (symname, "_imp_"))
  1079. destination
  1080. = read_memory_unsigned_integer (indirect_addr, 8, byte_order);
  1081. }
  1082. }
  1083. return destination;
  1084. }
  1085. /* Implement the "auto_wide_charset" gdbarch method. */
  1086. static const char *
  1087. amd64_windows_auto_wide_charset (void)
  1088. {
  1089. return "UTF-16";
  1090. }
  1091. /* Common parts for gdbarch initialization for Windows and Cygwin on AMD64. */
  1092. static void
  1093. amd64_windows_init_abi_common (gdbarch_info info, struct gdbarch *gdbarch)
  1094. {
  1095. i386_gdbarch_tdep *tdep = (i386_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1096. /* The dwarf2 unwinder (appended very early by i386_gdbarch_init) is
  1097. preferred over the SEH one. The reasons are:
  1098. - binaries without SEH but with dwarf2 debug info are correctly handled
  1099. (although they aren't ABI compliant, gcc before 4.7 didn't emit SEH
  1100. info).
  1101. - dwarf3 DW_OP_call_frame_cfa is correctly handled (it can only be
  1102. handled if the dwarf2 unwinder is used).
  1103. The call to amd64_init_abi appends default unwinders, that aren't
  1104. compatible with the SEH one.
  1105. */
  1106. frame_unwind_append_unwinder (gdbarch, &amd64_windows_frame_unwind);
  1107. amd64_init_abi (info, gdbarch,
  1108. amd64_target_description (X86_XSTATE_SSE_MASK, false));
  1109. /* Function calls. */
  1110. set_gdbarch_push_dummy_call (gdbarch, amd64_windows_push_dummy_call);
  1111. set_gdbarch_return_value (gdbarch, amd64_windows_return_value);
  1112. set_gdbarch_skip_main_prologue (gdbarch, amd64_skip_main_prologue);
  1113. set_gdbarch_skip_trampoline_code (gdbarch,
  1114. amd64_windows_skip_trampoline_code);
  1115. set_gdbarch_skip_prologue (gdbarch, amd64_windows_skip_prologue);
  1116. tdep->gregset_reg_offset = amd64_windows_gregset_reg_offset;
  1117. tdep->gregset_num_regs = ARRAY_SIZE (amd64_windows_gregset_reg_offset);
  1118. tdep->sizeof_gregset = AMD64_WINDOWS_SIZEOF_GREGSET;
  1119. tdep->sizeof_fpregset = 0;
  1120. /* Core file support. */
  1121. set_gdbarch_core_xfer_shared_libraries
  1122. (gdbarch, windows_core_xfer_shared_libraries);
  1123. set_gdbarch_core_pid_to_str (gdbarch, windows_core_pid_to_str);
  1124. set_gdbarch_auto_wide_charset (gdbarch, amd64_windows_auto_wide_charset);
  1125. }
  1126. /* gdbarch initialization for Windows on AMD64. */
  1127. static void
  1128. amd64_windows_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
  1129. {
  1130. amd64_windows_init_abi_common (info, gdbarch);
  1131. windows_init_abi (info, gdbarch);
  1132. /* On Windows, "long"s are only 32bit. */
  1133. set_gdbarch_long_bit (gdbarch, 32);
  1134. }
  1135. /* gdbarch initialization for Cygwin on AMD64. */
  1136. static void
  1137. amd64_cygwin_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
  1138. {
  1139. amd64_windows_init_abi_common (info, gdbarch);
  1140. cygwin_init_abi (info, gdbarch);
  1141. }
  1142. static gdb_osabi
  1143. amd64_windows_osabi_sniffer (bfd *abfd)
  1144. {
  1145. const char *target_name = bfd_get_target (abfd);
  1146. if (!streq (target_name, "pei-x86-64"))
  1147. return GDB_OSABI_UNKNOWN;
  1148. if (is_linked_with_cygwin_dll (abfd))
  1149. return GDB_OSABI_CYGWIN;
  1150. return GDB_OSABI_WINDOWS;
  1151. }
  1152. static enum gdb_osabi
  1153. amd64_cygwin_core_osabi_sniffer (bfd *abfd)
  1154. {
  1155. const char *target_name = bfd_get_target (abfd);
  1156. /* Cygwin uses elf core dumps. Do not claim all ELF executables,
  1157. check whether there is a .reg section of proper size. */
  1158. if (strcmp (target_name, "elf64-x86-64") == 0)
  1159. {
  1160. asection *section = bfd_get_section_by_name (abfd, ".reg");
  1161. if (section != nullptr
  1162. && bfd_section_size (section) == AMD64_WINDOWS_SIZEOF_GREGSET)
  1163. return GDB_OSABI_CYGWIN;
  1164. }
  1165. return GDB_OSABI_UNKNOWN;
  1166. }
  1167. void _initialize_amd64_windows_tdep ();
  1168. void
  1169. _initialize_amd64_windows_tdep ()
  1170. {
  1171. gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_WINDOWS,
  1172. amd64_windows_init_abi);
  1173. gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_CYGWIN,
  1174. amd64_cygwin_init_abi);
  1175. gdbarch_register_osabi_sniffer (bfd_arch_i386, bfd_target_coff_flavour,
  1176. amd64_windows_osabi_sniffer);
  1177. /* Cygwin uses elf core dumps. */
  1178. gdbarch_register_osabi_sniffer (bfd_arch_i386, bfd_target_elf_flavour,
  1179. amd64_cygwin_core_osabi_sniffer);
  1180. }