linux-aarch64-low.cc 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316
  1. /* GNU/Linux/AArch64 specific low level interface, for the remote server for
  2. GDB.
  3. Copyright (C) 2009-2022 Free Software Foundation, Inc.
  4. Contributed by ARM Ltd.
  5. This file is part of GDB.
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of the GNU General Public License as published by
  8. the Free Software Foundation; either version 3 of the License, or
  9. (at your option) any later version.
  10. This program is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. GNU General Public License for more details.
  14. You should have received a copy of the GNU General Public License
  15. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  16. #include "server.h"
  17. #include "linux-low.h"
  18. #include "nat/aarch64-linux.h"
  19. #include "nat/aarch64-linux-hw-point.h"
  20. #include "arch/aarch64-insn.h"
  21. #include "linux-aarch32-low.h"
  22. #include "elf/common.h"
  23. #include "ax.h"
  24. #include "tracepoint.h"
  25. #include "debug.h"
  26. #include <signal.h>
  27. #include <sys/user.h>
  28. #include "nat/gdb_ptrace.h"
  29. #include <asm/ptrace.h>
  30. #include <inttypes.h>
  31. #include <endian.h>
  32. #include <sys/uio.h>
  33. #include "gdb_proc_service.h"
  34. #include "arch/aarch64.h"
  35. #include "arch/aarch64-mte-linux.h"
  36. #include "linux-aarch32-tdesc.h"
  37. #include "linux-aarch64-tdesc.h"
  38. #include "nat/aarch64-mte-linux-ptrace.h"
  39. #include "nat/aarch64-sve-linux-ptrace.h"
  40. #include "tdesc.h"
  41. #ifdef HAVE_SYS_REG_H
  42. #include <sys/reg.h>
  43. #endif
  44. #ifdef HAVE_GETAUXVAL
  45. #include <sys/auxv.h>
  46. #endif
  47. /* Linux target op definitions for the AArch64 architecture. */
  48. class aarch64_target : public linux_process_target
  49. {
  50. public:
  51. const regs_info *get_regs_info () override;
  52. int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
  53. int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
  54. const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
  55. bool supports_z_point_type (char z_type) override;
  56. bool supports_tracepoints () override;
  57. bool supports_fast_tracepoints () override;
  58. int install_fast_tracepoint_jump_pad
  59. (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
  60. CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
  61. CORE_ADDR *trampoline, ULONGEST *trampoline_size,
  62. unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
  63. CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
  64. char *err) override;
  65. int get_min_fast_tracepoint_insn_len () override;
  66. struct emit_ops *emit_ops () override;
  67. bool supports_memory_tagging () override;
  68. bool fetch_memtags (CORE_ADDR address, size_t len,
  69. gdb::byte_vector &tags, int type) override;
  70. bool store_memtags (CORE_ADDR address, size_t len,
  71. const gdb::byte_vector &tags, int type) override;
  72. protected:
  73. void low_arch_setup () override;
  74. bool low_cannot_fetch_register (int regno) override;
  75. bool low_cannot_store_register (int regno) override;
  76. bool low_supports_breakpoints () override;
  77. CORE_ADDR low_get_pc (regcache *regcache) override;
  78. void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
  79. bool low_breakpoint_at (CORE_ADDR pc) override;
  80. int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
  81. int size, raw_breakpoint *bp) override;
  82. int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
  83. int size, raw_breakpoint *bp) override;
  84. bool low_stopped_by_watchpoint () override;
  85. CORE_ADDR low_stopped_data_address () override;
  86. bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
  87. int direction) override;
  88. arch_process_info *low_new_process () override;
  89. void low_delete_process (arch_process_info *info) override;
  90. void low_new_thread (lwp_info *) override;
  91. void low_delete_thread (arch_lwp_info *) override;
  92. void low_new_fork (process_info *parent, process_info *child) override;
  93. void low_prepare_to_resume (lwp_info *lwp) override;
  94. int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
  95. bool low_supports_range_stepping () override;
  96. bool low_supports_catch_syscall () override;
  97. void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
  98. };
  99. /* The singleton target ops object. */
  100. static aarch64_target the_aarch64_target;
  101. bool
  102. aarch64_target::low_cannot_fetch_register (int regno)
  103. {
  104. gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
  105. "is not implemented by the target");
  106. }
  107. bool
  108. aarch64_target::low_cannot_store_register (int regno)
  109. {
  110. gdb_assert_not_reached ("linux target op low_cannot_store_register "
  111. "is not implemented by the target");
  112. }
  113. void
  114. aarch64_target::low_prepare_to_resume (lwp_info *lwp)
  115. {
  116. aarch64_linux_prepare_to_resume (lwp);
  117. }
  118. /* Per-process arch-specific data we want to keep. */
  119. struct arch_process_info
  120. {
  121. /* Hardware breakpoint/watchpoint data.
  122. The reason for them to be per-process rather than per-thread is
  123. due to the lack of information in the gdbserver environment;
  124. gdbserver is not told that whether a requested hardware
  125. breakpoint/watchpoint is thread specific or not, so it has to set
  126. each hw bp/wp for every thread in the current process. The
  127. higher level bp/wp management in gdb will resume a thread if a hw
  128. bp/wp trap is not expected for it. Since the hw bp/wp setting is
  129. same for each thread, it is reasonable for the data to live here.
  130. */
  131. struct aarch64_debug_reg_state debug_reg_state;
  132. };
  133. /* Return true if the size of register 0 is 8 byte. */
  134. static int
  135. is_64bit_tdesc (void)
  136. {
  137. struct regcache *regcache = get_thread_regcache (current_thread, 0);
  138. return register_size (regcache->tdesc, 0) == 8;
  139. }
  140. static void
  141. aarch64_fill_gregset (struct regcache *regcache, void *buf)
  142. {
  143. struct user_pt_regs *regset = (struct user_pt_regs *) buf;
  144. int i;
  145. for (i = 0; i < AARCH64_X_REGS_NUM; i++)
  146. collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
  147. collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
  148. collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
  149. collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
  150. }
  151. static void
  152. aarch64_store_gregset (struct regcache *regcache, const void *buf)
  153. {
  154. const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
  155. int i;
  156. for (i = 0; i < AARCH64_X_REGS_NUM; i++)
  157. supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
  158. supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
  159. supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
  160. supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
  161. }
  162. static void
  163. aarch64_fill_fpregset (struct regcache *regcache, void *buf)
  164. {
  165. struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
  166. int i;
  167. for (i = 0; i < AARCH64_V_REGS_NUM; i++)
  168. collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
  169. collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
  170. collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
  171. }
  172. static void
  173. aarch64_store_fpregset (struct regcache *regcache, const void *buf)
  174. {
  175. const struct user_fpsimd_state *regset
  176. = (const struct user_fpsimd_state *) buf;
  177. int i;
  178. for (i = 0; i < AARCH64_V_REGS_NUM; i++)
  179. supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
  180. supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
  181. supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
  182. }
  183. /* Store the pauth registers to regcache. */
  184. static void
  185. aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
  186. {
  187. uint64_t *pauth_regset = (uint64_t *) buf;
  188. int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
  189. if (pauth_base == 0)
  190. return;
  191. supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
  192. &pauth_regset[0]);
  193. supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
  194. &pauth_regset[1]);
  195. }
  196. /* Fill BUF with the MTE registers from the regcache. */
  197. static void
  198. aarch64_fill_mteregset (struct regcache *regcache, void *buf)
  199. {
  200. uint64_t *mte_regset = (uint64_t *) buf;
  201. int mte_base = find_regno (regcache->tdesc, "tag_ctl");
  202. collect_register (regcache, mte_base, mte_regset);
  203. }
  204. /* Store the MTE registers to regcache. */
  205. static void
  206. aarch64_store_mteregset (struct regcache *regcache, const void *buf)
  207. {
  208. uint64_t *mte_regset = (uint64_t *) buf;
  209. int mte_base = find_regno (regcache->tdesc, "tag_ctl");
  210. /* Tag Control register */
  211. supply_register (regcache, mte_base, mte_regset);
  212. }
  213. bool
  214. aarch64_target::low_supports_breakpoints ()
  215. {
  216. return true;
  217. }
  218. /* Implementation of linux target ops method "low_get_pc". */
  219. CORE_ADDR
  220. aarch64_target::low_get_pc (regcache *regcache)
  221. {
  222. if (register_size (regcache->tdesc, 0) == 8)
  223. return linux_get_pc_64bit (regcache);
  224. else
  225. return linux_get_pc_32bit (regcache);
  226. }
  227. /* Implementation of linux target ops method "low_set_pc". */
  228. void
  229. aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
  230. {
  231. if (register_size (regcache->tdesc, 0) == 8)
  232. linux_set_pc_64bit (regcache, pc);
  233. else
  234. linux_set_pc_32bit (regcache, pc);
  235. }
  236. #define aarch64_breakpoint_len 4
  237. /* AArch64 BRK software debug mode instruction.
  238. This instruction needs to match gdb/aarch64-tdep.c
  239. (aarch64_default_breakpoint). */
  240. static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
  241. /* Implementation of linux target ops method "low_breakpoint_at". */
  242. bool
  243. aarch64_target::low_breakpoint_at (CORE_ADDR where)
  244. {
  245. if (is_64bit_tdesc ())
  246. {
  247. gdb_byte insn[aarch64_breakpoint_len];
  248. read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
  249. if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
  250. return true;
  251. return false;
  252. }
  253. else
  254. return arm_breakpoint_at (where);
  255. }
  256. static void
  257. aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
  258. {
  259. int i;
  260. for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
  261. {
  262. state->dr_addr_bp[i] = 0;
  263. state->dr_ctrl_bp[i] = 0;
  264. state->dr_ref_count_bp[i] = 0;
  265. }
  266. for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
  267. {
  268. state->dr_addr_wp[i] = 0;
  269. state->dr_ctrl_wp[i] = 0;
  270. state->dr_ref_count_wp[i] = 0;
  271. }
  272. }
  273. /* Return the pointer to the debug register state structure in the
  274. current process' arch-specific data area. */
  275. struct aarch64_debug_reg_state *
  276. aarch64_get_debug_reg_state (pid_t pid)
  277. {
  278. struct process_info *proc = find_process_pid (pid);
  279. return &proc->priv->arch_private->debug_reg_state;
  280. }
  281. /* Implementation of target ops method "supports_z_point_type". */
  282. bool
  283. aarch64_target::supports_z_point_type (char z_type)
  284. {
  285. switch (z_type)
  286. {
  287. case Z_PACKET_SW_BP:
  288. case Z_PACKET_HW_BP:
  289. case Z_PACKET_WRITE_WP:
  290. case Z_PACKET_READ_WP:
  291. case Z_PACKET_ACCESS_WP:
  292. return true;
  293. default:
  294. return false;
  295. }
  296. }
  297. /* Implementation of linux target ops method "low_insert_point".
  298. It actually only records the info of the to-be-inserted bp/wp;
  299. the actual insertion will happen when threads are resumed. */
  300. int
  301. aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
  302. int len, raw_breakpoint *bp)
  303. {
  304. int ret;
  305. enum target_hw_bp_type targ_type;
  306. struct aarch64_debug_reg_state *state
  307. = aarch64_get_debug_reg_state (pid_of (current_thread));
  308. if (show_debug_regs)
  309. fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
  310. (unsigned long) addr, len);
  311. /* Determine the type from the raw breakpoint type. */
  312. targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
  313. if (targ_type != hw_execute)
  314. {
  315. if (aarch64_region_ok_for_watchpoint (addr, len))
  316. ret = aarch64_handle_watchpoint (targ_type, addr, len,
  317. 1 /* is_insert */,
  318. current_lwp_ptid (), state);
  319. else
  320. ret = -1;
  321. }
  322. else
  323. {
  324. if (len == 3)
  325. {
  326. /* LEN is 3 means the breakpoint is set on a 32-bit thumb
  327. instruction. Set it to 2 to correctly encode length bit
  328. mask in hardware/watchpoint control register. */
  329. len = 2;
  330. }
  331. ret = aarch64_handle_breakpoint (targ_type, addr, len,
  332. 1 /* is_insert */, current_lwp_ptid (),
  333. state);
  334. }
  335. if (show_debug_regs)
  336. aarch64_show_debug_reg_state (state, "insert_point", addr, len,
  337. targ_type);
  338. return ret;
  339. }
  340. /* Implementation of linux target ops method "low_remove_point".
  341. It actually only records the info of the to-be-removed bp/wp,
  342. the actual removal will be done when threads are resumed. */
  343. int
  344. aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
  345. int len, raw_breakpoint *bp)
  346. {
  347. int ret;
  348. enum target_hw_bp_type targ_type;
  349. struct aarch64_debug_reg_state *state
  350. = aarch64_get_debug_reg_state (pid_of (current_thread));
  351. if (show_debug_regs)
  352. fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
  353. (unsigned long) addr, len);
  354. /* Determine the type from the raw breakpoint type. */
  355. targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
  356. /* Set up state pointers. */
  357. if (targ_type != hw_execute)
  358. ret =
  359. aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
  360. current_lwp_ptid (), state);
  361. else
  362. {
  363. if (len == 3)
  364. {
  365. /* LEN is 3 means the breakpoint is set on a 32-bit thumb
  366. instruction. Set it to 2 to correctly encode length bit
  367. mask in hardware/watchpoint control register. */
  368. len = 2;
  369. }
  370. ret = aarch64_handle_breakpoint (targ_type, addr, len,
  371. 0 /* is_insert */, current_lwp_ptid (),
  372. state);
  373. }
  374. if (show_debug_regs)
  375. aarch64_show_debug_reg_state (state, "remove_point", addr, len,
  376. targ_type);
  377. return ret;
  378. }
  379. /* Return the address only having significant bits. This is used to ignore
  380. the top byte (TBI). */
  381. static CORE_ADDR
  382. address_significant (CORE_ADDR addr)
  383. {
  384. /* Clear insignificant bits of a target address and sign extend resulting
  385. address. */
  386. int addr_bit = 56;
  387. CORE_ADDR sign = (CORE_ADDR) 1 << (addr_bit - 1);
  388. addr &= ((CORE_ADDR) 1 << addr_bit) - 1;
  389. addr = (addr ^ sign) - sign;
  390. return addr;
  391. }
  392. /* Implementation of linux target ops method "low_stopped_data_address". */
  393. CORE_ADDR
  394. aarch64_target::low_stopped_data_address ()
  395. {
  396. siginfo_t siginfo;
  397. int pid, i;
  398. struct aarch64_debug_reg_state *state;
  399. pid = lwpid_of (current_thread);
  400. /* Get the siginfo. */
  401. if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
  402. return (CORE_ADDR) 0;
  403. /* Need to be a hardware breakpoint/watchpoint trap. */
  404. if (siginfo.si_signo != SIGTRAP
  405. || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
  406. return (CORE_ADDR) 0;
  407. /* Make sure to ignore the top byte, otherwise we may not recognize a
  408. hardware watchpoint hit. The stopped data addresses coming from the
  409. kernel can potentially be tagged addresses. */
  410. const CORE_ADDR addr_trap
  411. = address_significant ((CORE_ADDR) siginfo.si_addr);
  412. /* Check if the address matches any watched address. */
  413. state = aarch64_get_debug_reg_state (pid_of (current_thread));
  414. for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
  415. {
  416. const unsigned int offset
  417. = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
  418. const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
  419. const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
  420. const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
  421. const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
  422. if (state->dr_ref_count_wp[i]
  423. && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
  424. && addr_trap >= addr_watch_aligned
  425. && addr_trap < addr_watch + len)
  426. {
  427. /* ADDR_TRAP reports the first address of the memory range
  428. accessed by the CPU, regardless of what was the memory
  429. range watched. Thus, a large CPU access that straddles
  430. the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
  431. ADDR_TRAP that is lower than the
  432. ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
  433. addr: | 4 | 5 | 6 | 7 | 8 |
  434. |---- range watched ----|
  435. |----------- range accessed ------------|
  436. In this case, ADDR_TRAP will be 4.
  437. To match a watchpoint known to GDB core, we must never
  438. report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
  439. range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
  440. positive on kernels older than 4.10. See PR
  441. external/20207. */
  442. return addr_orig;
  443. }
  444. }
  445. return (CORE_ADDR) 0;
  446. }
  447. /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
  448. bool
  449. aarch64_target::low_stopped_by_watchpoint ()
  450. {
  451. return (low_stopped_data_address () != 0);
  452. }
  453. /* Fetch the thread-local storage pointer for libthread_db. */
  454. ps_err_e
  455. ps_get_thread_area (struct ps_prochandle *ph,
  456. lwpid_t lwpid, int idx, void **base)
  457. {
  458. return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
  459. is_64bit_tdesc ());
  460. }
  461. /* Implementation of linux target ops method "low_siginfo_fixup". */
  462. bool
  463. aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
  464. int direction)
  465. {
  466. /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
  467. if (!is_64bit_tdesc ())
  468. {
  469. if (direction == 0)
  470. aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
  471. native);
  472. else
  473. aarch64_siginfo_from_compat_siginfo (native,
  474. (struct compat_siginfo *) inf);
  475. return true;
  476. }
  477. return false;
  478. }
  479. /* Implementation of linux target ops method "low_new_process". */
  480. arch_process_info *
  481. aarch64_target::low_new_process ()
  482. {
  483. struct arch_process_info *info = XCNEW (struct arch_process_info);
  484. aarch64_init_debug_reg_state (&info->debug_reg_state);
  485. return info;
  486. }
  487. /* Implementation of linux target ops method "low_delete_process". */
  488. void
  489. aarch64_target::low_delete_process (arch_process_info *info)
  490. {
  491. xfree (info);
  492. }
  493. void
  494. aarch64_target::low_new_thread (lwp_info *lwp)
  495. {
  496. aarch64_linux_new_thread (lwp);
  497. }
  498. void
  499. aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
  500. {
  501. aarch64_linux_delete_thread (arch_lwp);
  502. }
  503. /* Implementation of linux target ops method "low_new_fork". */
  504. void
  505. aarch64_target::low_new_fork (process_info *parent,
  506. process_info *child)
  507. {
  508. /* These are allocated by linux_add_process. */
  509. gdb_assert (parent->priv != NULL
  510. && parent->priv->arch_private != NULL);
  511. gdb_assert (child->priv != NULL
  512. && child->priv->arch_private != NULL);
  513. /* Linux kernel before 2.6.33 commit
  514. 72f674d203cd230426437cdcf7dd6f681dad8b0d
  515. will inherit hardware debug registers from parent
  516. on fork/vfork/clone. Newer Linux kernels create such tasks with
  517. zeroed debug registers.
  518. GDB core assumes the child inherits the watchpoints/hw
  519. breakpoints of the parent, and will remove them all from the
  520. forked off process. Copy the debug registers mirrors into the
  521. new process so that all breakpoints and watchpoints can be
  522. removed together. The debug registers mirror will become zeroed
  523. in the end before detaching the forked off process, thus making
  524. this compatible with older Linux kernels too. */
  525. *child->priv->arch_private = *parent->priv->arch_private;
  526. }
  527. /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
  528. static void
  529. aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
  530. {
  531. return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
  532. }
  533. /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
  534. static void
  535. aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
  536. {
  537. return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
  538. }
  539. /* Array containing all the possible register sets for AArch64/Linux. During
  540. architecture setup, these will be checked against the HWCAP/HWCAP2 bits for
  541. validity and enabled/disabled accordingly.
  542. Their sizes are set to 0 here, but they will be adjusted later depending
  543. on whether each register set is available or not. */
  544. static struct regset_info aarch64_regsets[] =
  545. {
  546. /* GPR registers. */
  547. { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
  548. 0, GENERAL_REGS,
  549. aarch64_fill_gregset, aarch64_store_gregset },
  550. /* Floating Point (FPU) registers. */
  551. { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
  552. 0, FP_REGS,
  553. aarch64_fill_fpregset, aarch64_store_fpregset
  554. },
  555. /* Scalable Vector Extension (SVE) registers. */
  556. { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
  557. 0, EXTENDED_REGS,
  558. aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
  559. },
  560. /* PAC registers. */
  561. { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
  562. 0, OPTIONAL_REGS,
  563. nullptr, aarch64_store_pauthregset },
  564. /* Tagged address control / MTE registers. */
  565. { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_TAGGED_ADDR_CTRL,
  566. 0, OPTIONAL_REGS,
  567. aarch64_fill_mteregset, aarch64_store_mteregset },
  568. NULL_REGSET
  569. };
  570. static struct regsets_info aarch64_regsets_info =
  571. {
  572. aarch64_regsets, /* regsets */
  573. 0, /* num_regsets */
  574. nullptr, /* disabled_regsets */
  575. };
  576. static struct regs_info regs_info_aarch64 =
  577. {
  578. nullptr, /* regset_bitmap */
  579. nullptr, /* usrregs */
  580. &aarch64_regsets_info,
  581. };
  582. /* Given FEATURES, adjust the available register sets by setting their
  583. sizes. A size of 0 means the register set is disabled and won't be
  584. used. */
  585. static void
  586. aarch64_adjust_register_sets (const struct aarch64_features &features)
  587. {
  588. struct regset_info *regset;
  589. for (regset = aarch64_regsets; regset->size >= 0; regset++)
  590. {
  591. switch (regset->nt_type)
  592. {
  593. case NT_PRSTATUS:
  594. /* General purpose registers are always present. */
  595. regset->size = sizeof (struct user_pt_regs);
  596. break;
  597. case NT_FPREGSET:
  598. /* This is unavailable when SVE is present. */
  599. if (!features.sve)
  600. regset->size = sizeof (struct user_fpsimd_state);
  601. break;
  602. case NT_ARM_SVE:
  603. if (features.sve)
  604. regset->size = SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE);
  605. break;
  606. case NT_ARM_PAC_MASK:
  607. if (features.pauth)
  608. regset->size = AARCH64_PAUTH_REGS_SIZE;
  609. break;
  610. case NT_ARM_TAGGED_ADDR_CTRL:
  611. if (features.mte)
  612. regset->size = AARCH64_LINUX_SIZEOF_MTE;
  613. break;
  614. default:
  615. gdb_assert_not_reached ("Unknown register set found.");
  616. }
  617. }
  618. }
  619. /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
  620. #define AARCH64_HWCAP_PACA (1 << 30)
  621. /* Implementation of linux target ops method "low_arch_setup". */
  622. void
  623. aarch64_target::low_arch_setup ()
  624. {
  625. unsigned int machine;
  626. int is_elf64;
  627. int tid;
  628. tid = lwpid_of (current_thread);
  629. is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
  630. if (is_elf64)
  631. {
  632. struct aarch64_features features;
  633. uint64_t vq = aarch64_sve_get_vq (tid);
  634. features.sve = (vq > 0);
  635. /* A-profile PAC is 64-bit only. */
  636. features.pauth = linux_get_hwcap (8) & AARCH64_HWCAP_PACA;
  637. /* A-profile MTE is 64-bit only. */
  638. features.mte = linux_get_hwcap2 (8) & HWCAP2_MTE;
  639. current_process ()->tdesc
  640. = aarch64_linux_read_description (vq, features.pauth, features.mte);
  641. /* Adjust the register sets we should use for this particular set of
  642. features. */
  643. aarch64_adjust_register_sets (features);
  644. }
  645. else
  646. current_process ()->tdesc = aarch32_linux_read_description ();
  647. aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
  648. }
  649. /* Implementation of linux target ops method "get_regs_info". */
  650. const regs_info *
  651. aarch64_target::get_regs_info ()
  652. {
  653. if (!is_64bit_tdesc ())
  654. return &regs_info_aarch32;
  655. /* AArch64 64-bit registers. */
  656. return &regs_info_aarch64;
  657. }
  658. /* Implementation of target ops method "supports_tracepoints". */
  659. bool
  660. aarch64_target::supports_tracepoints ()
  661. {
  662. if (current_thread == NULL)
  663. return true;
  664. else
  665. {
  666. /* We don't support tracepoints on aarch32 now. */
  667. return is_64bit_tdesc ();
  668. }
  669. }
  670. /* Implementation of linux target ops method "low_get_thread_area". */
  671. int
  672. aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
  673. {
  674. struct iovec iovec;
  675. uint64_t reg;
  676. iovec.iov_base = &reg;
  677. iovec.iov_len = sizeof (reg);
  678. if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
  679. return -1;
  680. *addrp = reg;
  681. return 0;
  682. }
  683. bool
  684. aarch64_target::low_supports_catch_syscall ()
  685. {
  686. return true;
  687. }
  688. /* Implementation of linux target ops method "low_get_syscall_trapinfo". */
  689. void
  690. aarch64_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
  691. {
  692. int use_64bit = register_size (regcache->tdesc, 0) == 8;
  693. if (use_64bit)
  694. {
  695. long l_sysno;
  696. collect_register_by_name (regcache, "x8", &l_sysno);
  697. *sysno = (int) l_sysno;
  698. }
  699. else
  700. collect_register_by_name (regcache, "r7", sysno);
  701. }
  702. /* List of condition codes that we need. */
  703. enum aarch64_condition_codes
  704. {
  705. EQ = 0x0,
  706. NE = 0x1,
  707. LO = 0x3,
  708. GE = 0xa,
  709. LT = 0xb,
  710. GT = 0xc,
  711. LE = 0xd,
  712. };
  713. enum aarch64_operand_type
  714. {
  715. OPERAND_IMMEDIATE,
  716. OPERAND_REGISTER,
  717. };
  718. /* Representation of an operand. At this time, it only supports register
  719. and immediate types. */
  720. struct aarch64_operand
  721. {
  722. /* Type of the operand. */
  723. enum aarch64_operand_type type;
  724. /* Value of the operand according to the type. */
  725. union
  726. {
  727. uint32_t imm;
  728. struct aarch64_register reg;
  729. };
  730. };
  731. /* List of registers that we are currently using, we can add more here as
  732. we need to use them. */
  733. /* General purpose scratch registers (64 bit). */
  734. static const struct aarch64_register x0 = { 0, 1 };
  735. static const struct aarch64_register x1 = { 1, 1 };
  736. static const struct aarch64_register x2 = { 2, 1 };
  737. static const struct aarch64_register x3 = { 3, 1 };
  738. static const struct aarch64_register x4 = { 4, 1 };
  739. /* General purpose scratch registers (32 bit). */
  740. static const struct aarch64_register w0 = { 0, 0 };
  741. static const struct aarch64_register w2 = { 2, 0 };
  742. /* Intra-procedure scratch registers. */
  743. static const struct aarch64_register ip0 = { 16, 1 };
  744. /* Special purpose registers. */
  745. static const struct aarch64_register fp = { 29, 1 };
  746. static const struct aarch64_register lr = { 30, 1 };
  747. static const struct aarch64_register sp = { 31, 1 };
  748. static const struct aarch64_register xzr = { 31, 1 };
  749. /* Dynamically allocate a new register. If we know the register
  750. statically, we should make it a global as above instead of using this
  751. helper function. */
  752. static struct aarch64_register
  753. aarch64_register (unsigned num, int is64)
  754. {
  755. return (struct aarch64_register) { num, is64 };
  756. }
  757. /* Helper function to create a register operand, for instructions with
  758. different types of operands.
  759. For example:
  760. p += emit_mov (p, x0, register_operand (x1)); */
  761. static struct aarch64_operand
  762. register_operand (struct aarch64_register reg)
  763. {
  764. struct aarch64_operand operand;
  765. operand.type = OPERAND_REGISTER;
  766. operand.reg = reg;
  767. return operand;
  768. }
  769. /* Helper function to create an immediate operand, for instructions with
  770. different types of operands.
  771. For example:
  772. p += emit_mov (p, x0, immediate_operand (12)); */
  773. static struct aarch64_operand
  774. immediate_operand (uint32_t imm)
  775. {
  776. struct aarch64_operand operand;
  777. operand.type = OPERAND_IMMEDIATE;
  778. operand.imm = imm;
  779. return operand;
  780. }
  781. /* Helper function to create an offset memory operand.
  782. For example:
  783. p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
  784. static struct aarch64_memory_operand
  785. offset_memory_operand (int32_t offset)
  786. {
  787. return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
  788. }
  789. /* Helper function to create a pre-index memory operand.
  790. For example:
  791. p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
  792. static struct aarch64_memory_operand
  793. preindex_memory_operand (int32_t index)
  794. {
  795. return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
  796. }
  797. /* Helper function to create a post-index memory operand.
  798. For example:
  799. p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
  800. static struct aarch64_memory_operand
  801. postindex_memory_operand (int32_t index)
  802. {
  803. return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
  804. }
  805. /* System control registers. These special registers can be written and
  806. read with the MRS and MSR instructions.
  807. - NZCV: Condition flags. GDB refers to this register under the CPSR
  808. name.
  809. - FPSR: Floating-point status register.
  810. - FPCR: Floating-point control registers.
  811. - TPIDR_EL0: Software thread ID register. */
  812. enum aarch64_system_control_registers
  813. {
  814. /* op0 op1 crn crm op2 */
  815. NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
  816. FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
  817. FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
  818. TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
  819. };
  820. /* Write a BLR instruction into *BUF.
  821. BLR rn
  822. RN is the register to branch to. */
  823. static int
  824. emit_blr (uint32_t *buf, struct aarch64_register rn)
  825. {
  826. return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
  827. }
  828. /* Write a RET instruction into *BUF.
  829. RET xn
  830. RN is the register to branch to. */
  831. static int
  832. emit_ret (uint32_t *buf, struct aarch64_register rn)
  833. {
  834. return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
  835. }
  836. static int
  837. emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
  838. struct aarch64_register rt,
  839. struct aarch64_register rt2,
  840. struct aarch64_register rn,
  841. struct aarch64_memory_operand operand)
  842. {
  843. uint32_t opc;
  844. uint32_t pre_index;
  845. uint32_t write_back;
  846. if (rt.is64)
  847. opc = ENCODE (2, 2, 30);
  848. else
  849. opc = ENCODE (0, 2, 30);
  850. switch (operand.type)
  851. {
  852. case MEMORY_OPERAND_OFFSET:
  853. {
  854. pre_index = ENCODE (1, 1, 24);
  855. write_back = ENCODE (0, 1, 23);
  856. break;
  857. }
  858. case MEMORY_OPERAND_POSTINDEX:
  859. {
  860. pre_index = ENCODE (0, 1, 24);
  861. write_back = ENCODE (1, 1, 23);
  862. break;
  863. }
  864. case MEMORY_OPERAND_PREINDEX:
  865. {
  866. pre_index = ENCODE (1, 1, 24);
  867. write_back = ENCODE (1, 1, 23);
  868. break;
  869. }
  870. default:
  871. return 0;
  872. }
  873. return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
  874. | ENCODE (operand.index >> 3, 7, 15)
  875. | ENCODE (rt2.num, 5, 10)
  876. | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
  877. }
  878. /* Write a STP instruction into *BUF.
  879. STP rt, rt2, [rn, #offset]
  880. STP rt, rt2, [rn, #index]!
  881. STP rt, rt2, [rn], #index
  882. RT and RT2 are the registers to store.
  883. RN is the base address register.
  884. OFFSET is the immediate to add to the base address. It is limited to a
  885. -512 .. 504 range (7 bits << 3). */
  886. static int
  887. emit_stp (uint32_t *buf, struct aarch64_register rt,
  888. struct aarch64_register rt2, struct aarch64_register rn,
  889. struct aarch64_memory_operand operand)
  890. {
  891. return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
  892. }
  893. /* Write a LDP instruction into *BUF.
  894. LDP rt, rt2, [rn, #offset]
  895. LDP rt, rt2, [rn, #index]!
  896. LDP rt, rt2, [rn], #index
  897. RT and RT2 are the registers to store.
  898. RN is the base address register.
  899. OFFSET is the immediate to add to the base address. It is limited to a
  900. -512 .. 504 range (7 bits << 3). */
  901. static int
  902. emit_ldp (uint32_t *buf, struct aarch64_register rt,
  903. struct aarch64_register rt2, struct aarch64_register rn,
  904. struct aarch64_memory_operand operand)
  905. {
  906. return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
  907. }
  908. /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
  909. LDP qt, qt2, [rn, #offset]
  910. RT and RT2 are the Q registers to store.
  911. RN is the base address register.
  912. OFFSET is the immediate to add to the base address. It is limited to
  913. -1024 .. 1008 range (7 bits << 4). */
  914. static int
  915. emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
  916. struct aarch64_register rn, int32_t offset)
  917. {
  918. uint32_t opc = ENCODE (2, 2, 30);
  919. uint32_t pre_index = ENCODE (1, 1, 24);
  920. return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
  921. | ENCODE (offset >> 4, 7, 15)
  922. | ENCODE (rt2, 5, 10)
  923. | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
  924. }
  925. /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
  926. STP qt, qt2, [rn, #offset]
  927. RT and RT2 are the Q registers to store.
  928. RN is the base address register.
  929. OFFSET is the immediate to add to the base address. It is limited to
  930. -1024 .. 1008 range (7 bits << 4). */
  931. static int
  932. emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
  933. struct aarch64_register rn, int32_t offset)
  934. {
  935. uint32_t opc = ENCODE (2, 2, 30);
  936. uint32_t pre_index = ENCODE (1, 1, 24);
  937. return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
  938. | ENCODE (offset >> 4, 7, 15)
  939. | ENCODE (rt2, 5, 10)
  940. | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
  941. }
  942. /* Write a LDRH instruction into *BUF.
  943. LDRH wt, [xn, #offset]
  944. LDRH wt, [xn, #index]!
  945. LDRH wt, [xn], #index
  946. RT is the register to store.
  947. RN is the base address register.
  948. OFFSET is the immediate to add to the base address. It is limited to
  949. 0 .. 32760 range (12 bits << 3). */
  950. static int
  951. emit_ldrh (uint32_t *buf, struct aarch64_register rt,
  952. struct aarch64_register rn,
  953. struct aarch64_memory_operand operand)
  954. {
  955. return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
  956. }
  957. /* Write a LDRB instruction into *BUF.
  958. LDRB wt, [xn, #offset]
  959. LDRB wt, [xn, #index]!
  960. LDRB wt, [xn], #index
  961. RT is the register to store.
  962. RN is the base address register.
  963. OFFSET is the immediate to add to the base address. It is limited to
  964. 0 .. 32760 range (12 bits << 3). */
  965. static int
  966. emit_ldrb (uint32_t *buf, struct aarch64_register rt,
  967. struct aarch64_register rn,
  968. struct aarch64_memory_operand operand)
  969. {
  970. return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
  971. }
  972. /* Write a STR instruction into *BUF.
  973. STR rt, [rn, #offset]
  974. STR rt, [rn, #index]!
  975. STR rt, [rn], #index
  976. RT is the register to store.
  977. RN is the base address register.
  978. OFFSET is the immediate to add to the base address. It is limited to
  979. 0 .. 32760 range (12 bits << 3). */
  980. static int
  981. emit_str (uint32_t *buf, struct aarch64_register rt,
  982. struct aarch64_register rn,
  983. struct aarch64_memory_operand operand)
  984. {
  985. return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
  986. }
  987. /* Helper function emitting an exclusive load or store instruction. */
  988. static int
  989. emit_load_store_exclusive (uint32_t *buf, uint32_t size,
  990. enum aarch64_opcodes opcode,
  991. struct aarch64_register rs,
  992. struct aarch64_register rt,
  993. struct aarch64_register rt2,
  994. struct aarch64_register rn)
  995. {
  996. return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
  997. | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
  998. | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
  999. }
  1000. /* Write a LAXR instruction into *BUF.
  1001. LDAXR rt, [xn]
  1002. RT is the destination register.
  1003. RN is the base address register. */
  1004. static int
  1005. emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
  1006. struct aarch64_register rn)
  1007. {
  1008. return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
  1009. xzr, rn);
  1010. }
  1011. /* Write a STXR instruction into *BUF.
  1012. STXR ws, rt, [xn]
  1013. RS is the result register, it indicates if the store succeeded or not.
  1014. RT is the destination register.
  1015. RN is the base address register. */
  1016. static int
  1017. emit_stxr (uint32_t *buf, struct aarch64_register rs,
  1018. struct aarch64_register rt, struct aarch64_register rn)
  1019. {
  1020. return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
  1021. xzr, rn);
  1022. }
  1023. /* Write a STLR instruction into *BUF.
  1024. STLR rt, [xn]
  1025. RT is the register to store.
  1026. RN is the base address register. */
  1027. static int
  1028. emit_stlr (uint32_t *buf, struct aarch64_register rt,
  1029. struct aarch64_register rn)
  1030. {
  1031. return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
  1032. xzr, rn);
  1033. }
  1034. /* Helper function for data processing instructions with register sources. */
  1035. static int
  1036. emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
  1037. struct aarch64_register rd,
  1038. struct aarch64_register rn,
  1039. struct aarch64_register rm)
  1040. {
  1041. uint32_t size = ENCODE (rd.is64, 1, 31);
  1042. return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
  1043. | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
  1044. }
  1045. /* Helper function for data processing instructions taking either a register
  1046. or an immediate. */
  1047. static int
  1048. emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
  1049. struct aarch64_register rd,
  1050. struct aarch64_register rn,
  1051. struct aarch64_operand operand)
  1052. {
  1053. uint32_t size = ENCODE (rd.is64, 1, 31);
  1054. /* The opcode is different for register and immediate source operands. */
  1055. uint32_t operand_opcode;
  1056. if (operand.type == OPERAND_IMMEDIATE)
  1057. {
  1058. /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
  1059. operand_opcode = ENCODE (8, 4, 25);
  1060. return aarch64_emit_insn (buf, opcode | operand_opcode | size
  1061. | ENCODE (operand.imm, 12, 10)
  1062. | ENCODE (rn.num, 5, 5)
  1063. | ENCODE (rd.num, 5, 0));
  1064. }
  1065. else
  1066. {
  1067. /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
  1068. operand_opcode = ENCODE (5, 4, 25);
  1069. return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
  1070. rn, operand.reg);
  1071. }
  1072. }
  1073. /* Write an ADD instruction into *BUF.
  1074. ADD rd, rn, #imm
  1075. ADD rd, rn, rm
  1076. This function handles both an immediate and register add.
  1077. RD is the destination register.
  1078. RN is the input register.
  1079. OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
  1080. OPERAND_REGISTER. */
  1081. static int
  1082. emit_add (uint32_t *buf, struct aarch64_register rd,
  1083. struct aarch64_register rn, struct aarch64_operand operand)
  1084. {
  1085. return emit_data_processing (buf, ADD, rd, rn, operand);
  1086. }
  1087. /* Write a SUB instruction into *BUF.
  1088. SUB rd, rn, #imm
  1089. SUB rd, rn, rm
  1090. This function handles both an immediate and register sub.
  1091. RD is the destination register.
  1092. RN is the input register.
  1093. IMM is the immediate to substract to RN. */
  1094. static int
  1095. emit_sub (uint32_t *buf, struct aarch64_register rd,
  1096. struct aarch64_register rn, struct aarch64_operand operand)
  1097. {
  1098. return emit_data_processing (buf, SUB, rd, rn, operand);
  1099. }
  1100. /* Write a MOV instruction into *BUF.
  1101. MOV rd, #imm
  1102. MOV rd, rm
  1103. This function handles both a wide immediate move and a register move,
  1104. with the condition that the source register is not xzr. xzr and the
  1105. stack pointer share the same encoding and this function only supports
  1106. the stack pointer.
  1107. RD is the destination register.
  1108. OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
  1109. OPERAND_REGISTER. */
  1110. static int
  1111. emit_mov (uint32_t *buf, struct aarch64_register rd,
  1112. struct aarch64_operand operand)
  1113. {
  1114. if (operand.type == OPERAND_IMMEDIATE)
  1115. {
  1116. uint32_t size = ENCODE (rd.is64, 1, 31);
  1117. /* Do not shift the immediate. */
  1118. uint32_t shift = ENCODE (0, 2, 21);
  1119. return aarch64_emit_insn (buf, MOV | size | shift
  1120. | ENCODE (operand.imm, 16, 5)
  1121. | ENCODE (rd.num, 5, 0));
  1122. }
  1123. else
  1124. return emit_add (buf, rd, operand.reg, immediate_operand (0));
  1125. }
  1126. /* Write a MOVK instruction into *BUF.
  1127. MOVK rd, #imm, lsl #shift
  1128. RD is the destination register.
  1129. IMM is the immediate.
  1130. SHIFT is the logical shift left to apply to IMM. */
  1131. static int
  1132. emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
  1133. unsigned shift)
  1134. {
  1135. uint32_t size = ENCODE (rd.is64, 1, 31);
  1136. return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
  1137. ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
  1138. }
  1139. /* Write instructions into *BUF in order to move ADDR into a register.
  1140. ADDR can be a 64-bit value.
  1141. This function will emit a series of MOV and MOVK instructions, such as:
  1142. MOV xd, #(addr)
  1143. MOVK xd, #(addr >> 16), lsl #16
  1144. MOVK xd, #(addr >> 32), lsl #32
  1145. MOVK xd, #(addr >> 48), lsl #48 */
  1146. static int
  1147. emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
  1148. {
  1149. uint32_t *p = buf;
  1150. /* The MOV (wide immediate) instruction clears to top bits of the
  1151. register. */
  1152. p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
  1153. if ((addr >> 16) != 0)
  1154. p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
  1155. else
  1156. return p - buf;
  1157. if ((addr >> 32) != 0)
  1158. p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
  1159. else
  1160. return p - buf;
  1161. if ((addr >> 48) != 0)
  1162. p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
  1163. return p - buf;
  1164. }
  1165. /* Write a SUBS instruction into *BUF.
  1166. SUBS rd, rn, rm
  1167. This instruction update the condition flags.
  1168. RD is the destination register.
  1169. RN and RM are the source registers. */
  1170. static int
  1171. emit_subs (uint32_t *buf, struct aarch64_register rd,
  1172. struct aarch64_register rn, struct aarch64_operand operand)
  1173. {
  1174. return emit_data_processing (buf, SUBS, rd, rn, operand);
  1175. }
  1176. /* Write a CMP instruction into *BUF.
  1177. CMP rn, rm
  1178. This instruction is an alias of SUBS xzr, rn, rm.
  1179. RN and RM are the registers to compare. */
  1180. static int
  1181. emit_cmp (uint32_t *buf, struct aarch64_register rn,
  1182. struct aarch64_operand operand)
  1183. {
  1184. return emit_subs (buf, xzr, rn, operand);
  1185. }
  1186. /* Write a AND instruction into *BUF.
  1187. AND rd, rn, rm
  1188. RD is the destination register.
  1189. RN and RM are the source registers. */
  1190. static int
  1191. emit_and (uint32_t *buf, struct aarch64_register rd,
  1192. struct aarch64_register rn, struct aarch64_register rm)
  1193. {
  1194. return emit_data_processing_reg (buf, AND, rd, rn, rm);
  1195. }
  1196. /* Write a ORR instruction into *BUF.
  1197. ORR rd, rn, rm
  1198. RD is the destination register.
  1199. RN and RM are the source registers. */
  1200. static int
  1201. emit_orr (uint32_t *buf, struct aarch64_register rd,
  1202. struct aarch64_register rn, struct aarch64_register rm)
  1203. {
  1204. return emit_data_processing_reg (buf, ORR, rd, rn, rm);
  1205. }
  1206. /* Write a ORN instruction into *BUF.
  1207. ORN rd, rn, rm
  1208. RD is the destination register.
  1209. RN and RM are the source registers. */
  1210. static int
  1211. emit_orn (uint32_t *buf, struct aarch64_register rd,
  1212. struct aarch64_register rn, struct aarch64_register rm)
  1213. {
  1214. return emit_data_processing_reg (buf, ORN, rd, rn, rm);
  1215. }
  1216. /* Write a EOR instruction into *BUF.
  1217. EOR rd, rn, rm
  1218. RD is the destination register.
  1219. RN and RM are the source registers. */
  1220. static int
  1221. emit_eor (uint32_t *buf, struct aarch64_register rd,
  1222. struct aarch64_register rn, struct aarch64_register rm)
  1223. {
  1224. return emit_data_processing_reg (buf, EOR, rd, rn, rm);
  1225. }
  1226. /* Write a MVN instruction into *BUF.
  1227. MVN rd, rm
  1228. This is an alias for ORN rd, xzr, rm.
  1229. RD is the destination register.
  1230. RM is the source register. */
  1231. static int
  1232. emit_mvn (uint32_t *buf, struct aarch64_register rd,
  1233. struct aarch64_register rm)
  1234. {
  1235. return emit_orn (buf, rd, xzr, rm);
  1236. }
  1237. /* Write a LSLV instruction into *BUF.
  1238. LSLV rd, rn, rm
  1239. RD is the destination register.
  1240. RN and RM are the source registers. */
  1241. static int
  1242. emit_lslv (uint32_t *buf, struct aarch64_register rd,
  1243. struct aarch64_register rn, struct aarch64_register rm)
  1244. {
  1245. return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
  1246. }
  1247. /* Write a LSRV instruction into *BUF.
  1248. LSRV rd, rn, rm
  1249. RD is the destination register.
  1250. RN and RM are the source registers. */
  1251. static int
  1252. emit_lsrv (uint32_t *buf, struct aarch64_register rd,
  1253. struct aarch64_register rn, struct aarch64_register rm)
  1254. {
  1255. return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
  1256. }
  1257. /* Write a ASRV instruction into *BUF.
  1258. ASRV rd, rn, rm
  1259. RD is the destination register.
  1260. RN and RM are the source registers. */
  1261. static int
  1262. emit_asrv (uint32_t *buf, struct aarch64_register rd,
  1263. struct aarch64_register rn, struct aarch64_register rm)
  1264. {
  1265. return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
  1266. }
  1267. /* Write a MUL instruction into *BUF.
  1268. MUL rd, rn, rm
  1269. RD is the destination register.
  1270. RN and RM are the source registers. */
  1271. static int
  1272. emit_mul (uint32_t *buf, struct aarch64_register rd,
  1273. struct aarch64_register rn, struct aarch64_register rm)
  1274. {
  1275. return emit_data_processing_reg (buf, MUL, rd, rn, rm);
  1276. }
  1277. /* Write a MRS instruction into *BUF. The register size is 64-bit.
  1278. MRS xt, system_reg
  1279. RT is the destination register.
  1280. SYSTEM_REG is special purpose register to read. */
  1281. static int
  1282. emit_mrs (uint32_t *buf, struct aarch64_register rt,
  1283. enum aarch64_system_control_registers system_reg)
  1284. {
  1285. return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
  1286. | ENCODE (rt.num, 5, 0));
  1287. }
  1288. /* Write a MSR instruction into *BUF. The register size is 64-bit.
  1289. MSR system_reg, xt
  1290. SYSTEM_REG is special purpose register to write.
  1291. RT is the input register. */
  1292. static int
  1293. emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
  1294. struct aarch64_register rt)
  1295. {
  1296. return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
  1297. | ENCODE (rt.num, 5, 0));
  1298. }
  1299. /* Write a SEVL instruction into *BUF.
  1300. This is a hint instruction telling the hardware to trigger an event. */
  1301. static int
  1302. emit_sevl (uint32_t *buf)
  1303. {
  1304. return aarch64_emit_insn (buf, SEVL);
  1305. }
  1306. /* Write a WFE instruction into *BUF.
  1307. This is a hint instruction telling the hardware to wait for an event. */
  1308. static int
  1309. emit_wfe (uint32_t *buf)
  1310. {
  1311. return aarch64_emit_insn (buf, WFE);
  1312. }
  1313. /* Write a SBFM instruction into *BUF.
  1314. SBFM rd, rn, #immr, #imms
  1315. This instruction moves the bits from #immr to #imms into the
  1316. destination, sign extending the result.
  1317. RD is the destination register.
  1318. RN is the source register.
  1319. IMMR is the bit number to start at (least significant bit).
  1320. IMMS is the bit number to stop at (most significant bit). */
  1321. static int
  1322. emit_sbfm (uint32_t *buf, struct aarch64_register rd,
  1323. struct aarch64_register rn, uint32_t immr, uint32_t imms)
  1324. {
  1325. uint32_t size = ENCODE (rd.is64, 1, 31);
  1326. uint32_t n = ENCODE (rd.is64, 1, 22);
  1327. return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
  1328. | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
  1329. | ENCODE (rd.num, 5, 0));
  1330. }
  1331. /* Write a SBFX instruction into *BUF.
  1332. SBFX rd, rn, #lsb, #width
  1333. This instruction moves #width bits from #lsb into the destination, sign
  1334. extending the result. This is an alias for:
  1335. SBFM rd, rn, #lsb, #(lsb + width - 1)
  1336. RD is the destination register.
  1337. RN is the source register.
  1338. LSB is the bit number to start at (least significant bit).
  1339. WIDTH is the number of bits to move. */
  1340. static int
  1341. emit_sbfx (uint32_t *buf, struct aarch64_register rd,
  1342. struct aarch64_register rn, uint32_t lsb, uint32_t width)
  1343. {
  1344. return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
  1345. }
  1346. /* Write a UBFM instruction into *BUF.
  1347. UBFM rd, rn, #immr, #imms
  1348. This instruction moves the bits from #immr to #imms into the
  1349. destination, extending the result with zeros.
  1350. RD is the destination register.
  1351. RN is the source register.
  1352. IMMR is the bit number to start at (least significant bit).
  1353. IMMS is the bit number to stop at (most significant bit). */
  1354. static int
  1355. emit_ubfm (uint32_t *buf, struct aarch64_register rd,
  1356. struct aarch64_register rn, uint32_t immr, uint32_t imms)
  1357. {
  1358. uint32_t size = ENCODE (rd.is64, 1, 31);
  1359. uint32_t n = ENCODE (rd.is64, 1, 22);
  1360. return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
  1361. | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
  1362. | ENCODE (rd.num, 5, 0));
  1363. }
  1364. /* Write a UBFX instruction into *BUF.
  1365. UBFX rd, rn, #lsb, #width
  1366. This instruction moves #width bits from #lsb into the destination,
  1367. extending the result with zeros. This is an alias for:
  1368. UBFM rd, rn, #lsb, #(lsb + width - 1)
  1369. RD is the destination register.
  1370. RN is the source register.
  1371. LSB is the bit number to start at (least significant bit).
  1372. WIDTH is the number of bits to move. */
  1373. static int
  1374. emit_ubfx (uint32_t *buf, struct aarch64_register rd,
  1375. struct aarch64_register rn, uint32_t lsb, uint32_t width)
  1376. {
  1377. return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
  1378. }
  1379. /* Write a CSINC instruction into *BUF.
  1380. CSINC rd, rn, rm, cond
  1381. This instruction conditionally increments rn or rm and places the result
  1382. in rd. rn is chosen is the condition is true.
  1383. RD is the destination register.
  1384. RN and RM are the source registers.
  1385. COND is the encoded condition. */
  1386. static int
  1387. emit_csinc (uint32_t *buf, struct aarch64_register rd,
  1388. struct aarch64_register rn, struct aarch64_register rm,
  1389. unsigned cond)
  1390. {
  1391. uint32_t size = ENCODE (rd.is64, 1, 31);
  1392. return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
  1393. | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
  1394. | ENCODE (rd.num, 5, 0));
  1395. }
  1396. /* Write a CSET instruction into *BUF.
  1397. CSET rd, cond
  1398. This instruction conditionally write 1 or 0 in the destination register.
  1399. 1 is written if the condition is true. This is an alias for:
  1400. CSINC rd, xzr, xzr, !cond
  1401. Note that the condition needs to be inverted.
  1402. RD is the destination register.
  1403. RN and RM are the source registers.
  1404. COND is the encoded condition. */
  1405. static int
  1406. emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
  1407. {
  1408. /* The least significant bit of the condition needs toggling in order to
  1409. invert it. */
  1410. return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
  1411. }
  1412. /* Write LEN instructions from BUF into the inferior memory at *TO.
  1413. Note instructions are always little endian on AArch64, unlike data. */
  1414. static void
  1415. append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
  1416. {
  1417. size_t byte_len = len * sizeof (uint32_t);
  1418. #if (__BYTE_ORDER == __BIG_ENDIAN)
  1419. uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
  1420. size_t i;
  1421. for (i = 0; i < len; i++)
  1422. le_buf[i] = htole32 (buf[i]);
  1423. target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
  1424. xfree (le_buf);
  1425. #else
  1426. target_write_memory (*to, (const unsigned char *) buf, byte_len);
  1427. #endif
  1428. *to += byte_len;
  1429. }
  1430. /* Sub-class of struct aarch64_insn_data, store information of
  1431. instruction relocation for fast tracepoint. Visitor can
  1432. relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
  1433. the relocated instructions in buffer pointed by INSN_PTR. */
  1434. struct aarch64_insn_relocation_data
  1435. {
  1436. struct aarch64_insn_data base;
  1437. /* The new address the instruction is relocated to. */
  1438. CORE_ADDR new_addr;
  1439. /* Pointer to the buffer of relocated instruction(s). */
  1440. uint32_t *insn_ptr;
  1441. };
  1442. /* Implementation of aarch64_insn_visitor method "b". */
  1443. static void
  1444. aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
  1445. struct aarch64_insn_data *data)
  1446. {
  1447. struct aarch64_insn_relocation_data *insn_reloc
  1448. = (struct aarch64_insn_relocation_data *) data;
  1449. int64_t new_offset
  1450. = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
  1451. if (can_encode_int32 (new_offset, 28))
  1452. insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
  1453. }
  1454. /* Implementation of aarch64_insn_visitor method "b_cond". */
  1455. static void
  1456. aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
  1457. struct aarch64_insn_data *data)
  1458. {
  1459. struct aarch64_insn_relocation_data *insn_reloc
  1460. = (struct aarch64_insn_relocation_data *) data;
  1461. int64_t new_offset
  1462. = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
  1463. if (can_encode_int32 (new_offset, 21))
  1464. {
  1465. insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
  1466. new_offset);
  1467. }
  1468. else if (can_encode_int32 (new_offset, 28))
  1469. {
  1470. /* The offset is out of range for a conditional branch
  1471. instruction but not for a unconditional branch. We can use
  1472. the following instructions instead:
  1473. B.COND TAKEN ; If cond is true, then jump to TAKEN.
  1474. B NOT_TAKEN ; Else jump over TAKEN and continue.
  1475. TAKEN:
  1476. B #(offset - 8)
  1477. NOT_TAKEN:
  1478. */
  1479. insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
  1480. insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
  1481. insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
  1482. }
  1483. }
  1484. /* Implementation of aarch64_insn_visitor method "cb". */
  1485. static void
  1486. aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
  1487. const unsigned rn, int is64,
  1488. struct aarch64_insn_data *data)
  1489. {
  1490. struct aarch64_insn_relocation_data *insn_reloc
  1491. = (struct aarch64_insn_relocation_data *) data;
  1492. int64_t new_offset
  1493. = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
  1494. if (can_encode_int32 (new_offset, 21))
  1495. {
  1496. insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
  1497. aarch64_register (rn, is64), new_offset);
  1498. }
  1499. else if (can_encode_int32 (new_offset, 28))
  1500. {
  1501. /* The offset is out of range for a compare and branch
  1502. instruction but not for a unconditional branch. We can use
  1503. the following instructions instead:
  1504. CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
  1505. B NOT_TAKEN ; Else jump over TAKEN and continue.
  1506. TAKEN:
  1507. B #(offset - 8)
  1508. NOT_TAKEN:
  1509. */
  1510. insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
  1511. aarch64_register (rn, is64), 8);
  1512. insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
  1513. insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
  1514. }
  1515. }
  1516. /* Implementation of aarch64_insn_visitor method "tb". */
  1517. static void
  1518. aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
  1519. const unsigned rt, unsigned bit,
  1520. struct aarch64_insn_data *data)
  1521. {
  1522. struct aarch64_insn_relocation_data *insn_reloc
  1523. = (struct aarch64_insn_relocation_data *) data;
  1524. int64_t new_offset
  1525. = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
  1526. if (can_encode_int32 (new_offset, 16))
  1527. {
  1528. insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
  1529. aarch64_register (rt, 1), new_offset);
  1530. }
  1531. else if (can_encode_int32 (new_offset, 28))
  1532. {
  1533. /* The offset is out of range for a test bit and branch
  1534. instruction but not for a unconditional branch. We can use
  1535. the following instructions instead:
  1536. TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
  1537. B NOT_TAKEN ; Else jump over TAKEN and continue.
  1538. TAKEN:
  1539. B #(offset - 8)
  1540. NOT_TAKEN:
  1541. */
  1542. insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
  1543. aarch64_register (rt, 1), 8);
  1544. insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
  1545. insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
  1546. new_offset - 8);
  1547. }
  1548. }
  1549. /* Implementation of aarch64_insn_visitor method "adr". */
  1550. static void
  1551. aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
  1552. const int is_adrp,
  1553. struct aarch64_insn_data *data)
  1554. {
  1555. struct aarch64_insn_relocation_data *insn_reloc
  1556. = (struct aarch64_insn_relocation_data *) data;
  1557. /* We know exactly the address the ADR{P,} instruction will compute.
  1558. We can just write it to the destination register. */
  1559. CORE_ADDR address = data->insn_addr + offset;
  1560. if (is_adrp)
  1561. {
  1562. /* Clear the lower 12 bits of the offset to get the 4K page. */
  1563. insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
  1564. aarch64_register (rd, 1),
  1565. address & ~0xfff);
  1566. }
  1567. else
  1568. insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
  1569. aarch64_register (rd, 1), address);
  1570. }
  1571. /* Implementation of aarch64_insn_visitor method "ldr_literal". */
  1572. static void
  1573. aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
  1574. const unsigned rt, const int is64,
  1575. struct aarch64_insn_data *data)
  1576. {
  1577. struct aarch64_insn_relocation_data *insn_reloc
  1578. = (struct aarch64_insn_relocation_data *) data;
  1579. CORE_ADDR address = data->insn_addr + offset;
  1580. insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
  1581. aarch64_register (rt, 1), address);
  1582. /* We know exactly what address to load from, and what register we
  1583. can use:
  1584. MOV xd, #(oldloc + offset)
  1585. MOVK xd, #((oldloc + offset) >> 16), lsl #16
  1586. ...
  1587. LDR xd, [xd] ; or LDRSW xd, [xd]
  1588. */
  1589. if (is_sw)
  1590. insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
  1591. aarch64_register (rt, 1),
  1592. aarch64_register (rt, 1),
  1593. offset_memory_operand (0));
  1594. else
  1595. insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
  1596. aarch64_register (rt, is64),
  1597. aarch64_register (rt, 1),
  1598. offset_memory_operand (0));
  1599. }
  1600. /* Implementation of aarch64_insn_visitor method "others". */
  1601. static void
  1602. aarch64_ftrace_insn_reloc_others (const uint32_t insn,
  1603. struct aarch64_insn_data *data)
  1604. {
  1605. struct aarch64_insn_relocation_data *insn_reloc
  1606. = (struct aarch64_insn_relocation_data *) data;
  1607. /* The instruction is not PC relative. Just re-emit it at the new
  1608. location. */
  1609. insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
  1610. }
  1611. static const struct aarch64_insn_visitor visitor =
  1612. {
  1613. aarch64_ftrace_insn_reloc_b,
  1614. aarch64_ftrace_insn_reloc_b_cond,
  1615. aarch64_ftrace_insn_reloc_cb,
  1616. aarch64_ftrace_insn_reloc_tb,
  1617. aarch64_ftrace_insn_reloc_adr,
  1618. aarch64_ftrace_insn_reloc_ldr_literal,
  1619. aarch64_ftrace_insn_reloc_others,
  1620. };
  1621. bool
  1622. aarch64_target::supports_fast_tracepoints ()
  1623. {
  1624. return true;
  1625. }
  1626. /* Implementation of target ops method
  1627. "install_fast_tracepoint_jump_pad". */
  1628. int
  1629. aarch64_target::install_fast_tracepoint_jump_pad
  1630. (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
  1631. CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
  1632. CORE_ADDR *trampoline, ULONGEST *trampoline_size,
  1633. unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
  1634. CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
  1635. char *err)
  1636. {
  1637. uint32_t buf[256];
  1638. uint32_t *p = buf;
  1639. int64_t offset;
  1640. int i;
  1641. uint32_t insn;
  1642. CORE_ADDR buildaddr = *jump_entry;
  1643. struct aarch64_insn_relocation_data insn_data;
  1644. /* We need to save the current state on the stack both to restore it
  1645. later and to collect register values when the tracepoint is hit.
  1646. The saved registers are pushed in a layout that needs to be in sync
  1647. with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
  1648. the supply_fast_tracepoint_registers function will fill in the
  1649. register cache from a pointer to saved registers on the stack we build
  1650. here.
  1651. For simplicity, we set the size of each cell on the stack to 16 bytes.
  1652. This way one cell can hold any register type, from system registers
  1653. to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
  1654. has to be 16 bytes aligned anyway.
  1655. Note that the CPSR register does not exist on AArch64. Instead we
  1656. can access system bits describing the process state with the
  1657. MRS/MSR instructions, namely the condition flags. We save them as
  1658. if they are part of a CPSR register because that's how GDB
  1659. interprets these system bits. At the moment, only the condition
  1660. flags are saved in CPSR (NZCV).
  1661. Stack layout, each cell is 16 bytes (descending):
  1662. High *-------- SIMD&FP registers from 31 down to 0. --------*
  1663. | q31 |
  1664. . .
  1665. . . 32 cells
  1666. . .
  1667. | q0 |
  1668. *---- General purpose registers from 30 down to 0. ----*
  1669. | x30 |
  1670. . .
  1671. . . 31 cells
  1672. . .
  1673. | x0 |
  1674. *------------- Special purpose registers. -------------*
  1675. | SP |
  1676. | PC |
  1677. | CPSR (NZCV) | 5 cells
  1678. | FPSR |
  1679. | FPCR | <- SP + 16
  1680. *------------- collecting_t object --------------------*
  1681. | TPIDR_EL0 | struct tracepoint * |
  1682. Low *------------------------------------------------------*
  1683. After this stack is set up, we issue a call to the collector, passing
  1684. it the saved registers at (SP + 16). */
  1685. /* Push SIMD&FP registers on the stack:
  1686. SUB sp, sp, #(32 * 16)
  1687. STP q30, q31, [sp, #(30 * 16)]
  1688. ...
  1689. STP q0, q1, [sp]
  1690. */
  1691. p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
  1692. for (i = 30; i >= 0; i -= 2)
  1693. p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
  1694. /* Push general purpose registers on the stack. Note that we do not need
  1695. to push x31 as it represents the xzr register and not the stack
  1696. pointer in a STR instruction.
  1697. SUB sp, sp, #(31 * 16)
  1698. STR x30, [sp, #(30 * 16)]
  1699. ...
  1700. STR x0, [sp]
  1701. */
  1702. p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
  1703. for (i = 30; i >= 0; i -= 1)
  1704. p += emit_str (p, aarch64_register (i, 1), sp,
  1705. offset_memory_operand (i * 16));
  1706. /* Make space for 5 more cells.
  1707. SUB sp, sp, #(5 * 16)
  1708. */
  1709. p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
  1710. /* Save SP:
  1711. ADD x4, sp, #((32 + 31 + 5) * 16)
  1712. STR x4, [sp, #(4 * 16)]
  1713. */
  1714. p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
  1715. p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
  1716. /* Save PC (tracepoint address):
  1717. MOV x3, #(tpaddr)
  1718. ...
  1719. STR x3, [sp, #(3 * 16)]
  1720. */
  1721. p += emit_mov_addr (p, x3, tpaddr);
  1722. p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
  1723. /* Save CPSR (NZCV), FPSR and FPCR:
  1724. MRS x2, nzcv
  1725. MRS x1, fpsr
  1726. MRS x0, fpcr
  1727. STR x2, [sp, #(2 * 16)]
  1728. STR x1, [sp, #(1 * 16)]
  1729. STR x0, [sp, #(0 * 16)]
  1730. */
  1731. p += emit_mrs (p, x2, NZCV);
  1732. p += emit_mrs (p, x1, FPSR);
  1733. p += emit_mrs (p, x0, FPCR);
  1734. p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
  1735. p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
  1736. p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
  1737. /* Push the collecting_t object. It consist of the address of the
  1738. tracepoint and an ID for the current thread. We get the latter by
  1739. reading the tpidr_el0 system register. It corresponds to the
  1740. NT_ARM_TLS register accessible with ptrace.
  1741. MOV x0, #(tpoint)
  1742. ...
  1743. MRS x1, tpidr_el0
  1744. STP x0, x1, [sp, #-16]!
  1745. */
  1746. p += emit_mov_addr (p, x0, tpoint);
  1747. p += emit_mrs (p, x1, TPIDR_EL0);
  1748. p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
  1749. /* Spin-lock:
  1750. The shared memory for the lock is at lockaddr. It will hold zero
  1751. if no-one is holding the lock, otherwise it contains the address of
  1752. the collecting_t object on the stack of the thread which acquired it.
  1753. At this stage, the stack pointer points to this thread's collecting_t
  1754. object.
  1755. We use the following registers:
  1756. - x0: Address of the lock.
  1757. - x1: Pointer to collecting_t object.
  1758. - x2: Scratch register.
  1759. MOV x0, #(lockaddr)
  1760. ...
  1761. MOV x1, sp
  1762. ; Trigger an event local to this core. So the following WFE
  1763. ; instruction is ignored.
  1764. SEVL
  1765. again:
  1766. ; Wait for an event. The event is triggered by either the SEVL
  1767. ; or STLR instructions (store release).
  1768. WFE
  1769. ; Atomically read at lockaddr. This marks the memory location as
  1770. ; exclusive. This instruction also has memory constraints which
  1771. ; make sure all previous data reads and writes are done before
  1772. ; executing it.
  1773. LDAXR x2, [x0]
  1774. ; Try again if another thread holds the lock.
  1775. CBNZ x2, again
  1776. ; We can lock it! Write the address of the collecting_t object.
  1777. ; This instruction will fail if the memory location is not marked
  1778. ; as exclusive anymore. If it succeeds, it will remove the
  1779. ; exclusive mark on the memory location. This way, if another
  1780. ; thread executes this instruction before us, we will fail and try
  1781. ; all over again.
  1782. STXR w2, x1, [x0]
  1783. CBNZ w2, again
  1784. */
  1785. p += emit_mov_addr (p, x0, lockaddr);
  1786. p += emit_mov (p, x1, register_operand (sp));
  1787. p += emit_sevl (p);
  1788. p += emit_wfe (p);
  1789. p += emit_ldaxr (p, x2, x0);
  1790. p += emit_cb (p, 1, w2, -2 * 4);
  1791. p += emit_stxr (p, w2, x1, x0);
  1792. p += emit_cb (p, 1, x2, -4 * 4);
  1793. /* Call collector (struct tracepoint *, unsigned char *):
  1794. MOV x0, #(tpoint)
  1795. ...
  1796. ; Saved registers start after the collecting_t object.
  1797. ADD x1, sp, #16
  1798. ; We use an intra-procedure-call scratch register.
  1799. MOV ip0, #(collector)
  1800. ...
  1801. ; And call back to C!
  1802. BLR ip0
  1803. */
  1804. p += emit_mov_addr (p, x0, tpoint);
  1805. p += emit_add (p, x1, sp, immediate_operand (16));
  1806. p += emit_mov_addr (p, ip0, collector);
  1807. p += emit_blr (p, ip0);
  1808. /* Release the lock.
  1809. MOV x0, #(lockaddr)
  1810. ...
  1811. ; This instruction is a normal store with memory ordering
  1812. ; constraints. Thanks to this we do not have to put a data
  1813. ; barrier instruction to make sure all data read and writes are done
  1814. ; before this instruction is executed. Furthermore, this instruction
  1815. ; will trigger an event, letting other threads know they can grab
  1816. ; the lock.
  1817. STLR xzr, [x0]
  1818. */
  1819. p += emit_mov_addr (p, x0, lockaddr);
  1820. p += emit_stlr (p, xzr, x0);
  1821. /* Free collecting_t object:
  1822. ADD sp, sp, #16
  1823. */
  1824. p += emit_add (p, sp, sp, immediate_operand (16));
  1825. /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
  1826. registers from the stack.
  1827. LDR x2, [sp, #(2 * 16)]
  1828. LDR x1, [sp, #(1 * 16)]
  1829. LDR x0, [sp, #(0 * 16)]
  1830. MSR NZCV, x2
  1831. MSR FPSR, x1
  1832. MSR FPCR, x0
  1833. ADD sp, sp #(5 * 16)
  1834. */
  1835. p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
  1836. p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
  1837. p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
  1838. p += emit_msr (p, NZCV, x2);
  1839. p += emit_msr (p, FPSR, x1);
  1840. p += emit_msr (p, FPCR, x0);
  1841. p += emit_add (p, sp, sp, immediate_operand (5 * 16));
  1842. /* Pop general purpose registers:
  1843. LDR x0, [sp]
  1844. ...
  1845. LDR x30, [sp, #(30 * 16)]
  1846. ADD sp, sp, #(31 * 16)
  1847. */
  1848. for (i = 0; i <= 30; i += 1)
  1849. p += emit_ldr (p, aarch64_register (i, 1), sp,
  1850. offset_memory_operand (i * 16));
  1851. p += emit_add (p, sp, sp, immediate_operand (31 * 16));
  1852. /* Pop SIMD&FP registers:
  1853. LDP q0, q1, [sp]
  1854. ...
  1855. LDP q30, q31, [sp, #(30 * 16)]
  1856. ADD sp, sp, #(32 * 16)
  1857. */
  1858. for (i = 0; i <= 30; i += 2)
  1859. p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
  1860. p += emit_add (p, sp, sp, immediate_operand (32 * 16));
  1861. /* Write the code into the inferior memory. */
  1862. append_insns (&buildaddr, p - buf, buf);
  1863. /* Now emit the relocated instruction. */
  1864. *adjusted_insn_addr = buildaddr;
  1865. target_read_uint32 (tpaddr, &insn);
  1866. insn_data.base.insn_addr = tpaddr;
  1867. insn_data.new_addr = buildaddr;
  1868. insn_data.insn_ptr = buf;
  1869. aarch64_relocate_instruction (insn, &visitor,
  1870. (struct aarch64_insn_data *) &insn_data);
  1871. /* We may not have been able to relocate the instruction. */
  1872. if (insn_data.insn_ptr == buf)
  1873. {
  1874. sprintf (err,
  1875. "E.Could not relocate instruction from %s to %s.",
  1876. core_addr_to_string_nz (tpaddr),
  1877. core_addr_to_string_nz (buildaddr));
  1878. return 1;
  1879. }
  1880. else
  1881. append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
  1882. *adjusted_insn_addr_end = buildaddr;
  1883. /* Go back to the start of the buffer. */
  1884. p = buf;
  1885. /* Emit a branch back from the jump pad. */
  1886. offset = (tpaddr + orig_size - buildaddr);
  1887. if (!can_encode_int32 (offset, 28))
  1888. {
  1889. sprintf (err,
  1890. "E.Jump back from jump pad too far from tracepoint "
  1891. "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
  1892. offset);
  1893. return 1;
  1894. }
  1895. p += emit_b (p, 0, offset);
  1896. append_insns (&buildaddr, p - buf, buf);
  1897. /* Give the caller a branch instruction into the jump pad. */
  1898. offset = (*jump_entry - tpaddr);
  1899. if (!can_encode_int32 (offset, 28))
  1900. {
  1901. sprintf (err,
  1902. "E.Jump pad too far from tracepoint "
  1903. "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
  1904. offset);
  1905. return 1;
  1906. }
  1907. emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
  1908. *jjump_pad_insn_size = 4;
  1909. /* Return the end address of our pad. */
  1910. *jump_entry = buildaddr;
  1911. return 0;
  1912. }
  1913. /* Helper function writing LEN instructions from START into
  1914. current_insn_ptr. */
  1915. static void
  1916. emit_ops_insns (const uint32_t *start, int len)
  1917. {
  1918. CORE_ADDR buildaddr = current_insn_ptr;
  1919. threads_debug_printf ("Adding %d instrucions at %s",
  1920. len, paddress (buildaddr));
  1921. append_insns (&buildaddr, len, start);
  1922. current_insn_ptr = buildaddr;
  1923. }
  1924. /* Pop a register from the stack. */
  1925. static int
  1926. emit_pop (uint32_t *buf, struct aarch64_register rt)
  1927. {
  1928. return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
  1929. }
  1930. /* Push a register on the stack. */
  1931. static int
  1932. emit_push (uint32_t *buf, struct aarch64_register rt)
  1933. {
  1934. return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
  1935. }
  1936. /* Implementation of emit_ops method "emit_prologue". */
  1937. static void
  1938. aarch64_emit_prologue (void)
  1939. {
  1940. uint32_t buf[16];
  1941. uint32_t *p = buf;
  1942. /* This function emit a prologue for the following function prototype:
  1943. enum eval_result_type f (unsigned char *regs,
  1944. ULONGEST *value);
  1945. The first argument is a buffer of raw registers. The second
  1946. argument is the result of
  1947. evaluating the expression, which will be set to whatever is on top of
  1948. the stack at the end.
  1949. The stack set up by the prologue is as such:
  1950. High *------------------------------------------------------*
  1951. | LR |
  1952. | FP | <- FP
  1953. | x1 (ULONGEST *value) |
  1954. | x0 (unsigned char *regs) |
  1955. Low *------------------------------------------------------*
  1956. As we are implementing a stack machine, each opcode can expand the
  1957. stack so we never know how far we are from the data saved by this
  1958. prologue. In order to be able refer to value and regs later, we save
  1959. the current stack pointer in the frame pointer. This way, it is not
  1960. clobbered when calling C functions.
  1961. Finally, throughout every operation, we are using register x0 as the
  1962. top of the stack, and x1 as a scratch register. */
  1963. p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
  1964. p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
  1965. p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
  1966. p += emit_add (p, fp, sp, immediate_operand (2 * 8));
  1967. emit_ops_insns (buf, p - buf);
  1968. }
  1969. /* Implementation of emit_ops method "emit_epilogue". */
  1970. static void
  1971. aarch64_emit_epilogue (void)
  1972. {
  1973. uint32_t buf[16];
  1974. uint32_t *p = buf;
  1975. /* Store the result of the expression (x0) in *value. */
  1976. p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
  1977. p += emit_ldr (p, x1, x1, offset_memory_operand (0));
  1978. p += emit_str (p, x0, x1, offset_memory_operand (0));
  1979. /* Restore the previous state. */
  1980. p += emit_add (p, sp, fp, immediate_operand (2 * 8));
  1981. p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
  1982. /* Return expr_eval_no_error. */
  1983. p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
  1984. p += emit_ret (p, lr);
  1985. emit_ops_insns (buf, p - buf);
  1986. }
  1987. /* Implementation of emit_ops method "emit_add". */
  1988. static void
  1989. aarch64_emit_add (void)
  1990. {
  1991. uint32_t buf[16];
  1992. uint32_t *p = buf;
  1993. p += emit_pop (p, x1);
  1994. p += emit_add (p, x0, x1, register_operand (x0));
  1995. emit_ops_insns (buf, p - buf);
  1996. }
  1997. /* Implementation of emit_ops method "emit_sub". */
  1998. static void
  1999. aarch64_emit_sub (void)
  2000. {
  2001. uint32_t buf[16];
  2002. uint32_t *p = buf;
  2003. p += emit_pop (p, x1);
  2004. p += emit_sub (p, x0, x1, register_operand (x0));
  2005. emit_ops_insns (buf, p - buf);
  2006. }
  2007. /* Implementation of emit_ops method "emit_mul". */
  2008. static void
  2009. aarch64_emit_mul (void)
  2010. {
  2011. uint32_t buf[16];
  2012. uint32_t *p = buf;
  2013. p += emit_pop (p, x1);
  2014. p += emit_mul (p, x0, x1, x0);
  2015. emit_ops_insns (buf, p - buf);
  2016. }
  2017. /* Implementation of emit_ops method "emit_lsh". */
  2018. static void
  2019. aarch64_emit_lsh (void)
  2020. {
  2021. uint32_t buf[16];
  2022. uint32_t *p = buf;
  2023. p += emit_pop (p, x1);
  2024. p += emit_lslv (p, x0, x1, x0);
  2025. emit_ops_insns (buf, p - buf);
  2026. }
  2027. /* Implementation of emit_ops method "emit_rsh_signed". */
  2028. static void
  2029. aarch64_emit_rsh_signed (void)
  2030. {
  2031. uint32_t buf[16];
  2032. uint32_t *p = buf;
  2033. p += emit_pop (p, x1);
  2034. p += emit_asrv (p, x0, x1, x0);
  2035. emit_ops_insns (buf, p - buf);
  2036. }
  2037. /* Implementation of emit_ops method "emit_rsh_unsigned". */
  2038. static void
  2039. aarch64_emit_rsh_unsigned (void)
  2040. {
  2041. uint32_t buf[16];
  2042. uint32_t *p = buf;
  2043. p += emit_pop (p, x1);
  2044. p += emit_lsrv (p, x0, x1, x0);
  2045. emit_ops_insns (buf, p - buf);
  2046. }
  2047. /* Implementation of emit_ops method "emit_ext". */
  2048. static void
  2049. aarch64_emit_ext (int arg)
  2050. {
  2051. uint32_t buf[16];
  2052. uint32_t *p = buf;
  2053. p += emit_sbfx (p, x0, x0, 0, arg);
  2054. emit_ops_insns (buf, p - buf);
  2055. }
  2056. /* Implementation of emit_ops method "emit_log_not". */
  2057. static void
  2058. aarch64_emit_log_not (void)
  2059. {
  2060. uint32_t buf[16];
  2061. uint32_t *p = buf;
  2062. /* If the top of the stack is 0, replace it with 1. Else replace it with
  2063. 0. */
  2064. p += emit_cmp (p, x0, immediate_operand (0));
  2065. p += emit_cset (p, x0, EQ);
  2066. emit_ops_insns (buf, p - buf);
  2067. }
  2068. /* Implementation of emit_ops method "emit_bit_and". */
  2069. static void
  2070. aarch64_emit_bit_and (void)
  2071. {
  2072. uint32_t buf[16];
  2073. uint32_t *p = buf;
  2074. p += emit_pop (p, x1);
  2075. p += emit_and (p, x0, x0, x1);
  2076. emit_ops_insns (buf, p - buf);
  2077. }
  2078. /* Implementation of emit_ops method "emit_bit_or". */
  2079. static void
  2080. aarch64_emit_bit_or (void)
  2081. {
  2082. uint32_t buf[16];
  2083. uint32_t *p = buf;
  2084. p += emit_pop (p, x1);
  2085. p += emit_orr (p, x0, x0, x1);
  2086. emit_ops_insns (buf, p - buf);
  2087. }
  2088. /* Implementation of emit_ops method "emit_bit_xor". */
  2089. static void
  2090. aarch64_emit_bit_xor (void)
  2091. {
  2092. uint32_t buf[16];
  2093. uint32_t *p = buf;
  2094. p += emit_pop (p, x1);
  2095. p += emit_eor (p, x0, x0, x1);
  2096. emit_ops_insns (buf, p - buf);
  2097. }
  2098. /* Implementation of emit_ops method "emit_bit_not". */
  2099. static void
  2100. aarch64_emit_bit_not (void)
  2101. {
  2102. uint32_t buf[16];
  2103. uint32_t *p = buf;
  2104. p += emit_mvn (p, x0, x0);
  2105. emit_ops_insns (buf, p - buf);
  2106. }
  2107. /* Implementation of emit_ops method "emit_equal". */
  2108. static void
  2109. aarch64_emit_equal (void)
  2110. {
  2111. uint32_t buf[16];
  2112. uint32_t *p = buf;
  2113. p += emit_pop (p, x1);
  2114. p += emit_cmp (p, x0, register_operand (x1));
  2115. p += emit_cset (p, x0, EQ);
  2116. emit_ops_insns (buf, p - buf);
  2117. }
  2118. /* Implementation of emit_ops method "emit_less_signed". */
  2119. static void
  2120. aarch64_emit_less_signed (void)
  2121. {
  2122. uint32_t buf[16];
  2123. uint32_t *p = buf;
  2124. p += emit_pop (p, x1);
  2125. p += emit_cmp (p, x1, register_operand (x0));
  2126. p += emit_cset (p, x0, LT);
  2127. emit_ops_insns (buf, p - buf);
  2128. }
  2129. /* Implementation of emit_ops method "emit_less_unsigned". */
  2130. static void
  2131. aarch64_emit_less_unsigned (void)
  2132. {
  2133. uint32_t buf[16];
  2134. uint32_t *p = buf;
  2135. p += emit_pop (p, x1);
  2136. p += emit_cmp (p, x1, register_operand (x0));
  2137. p += emit_cset (p, x0, LO);
  2138. emit_ops_insns (buf, p - buf);
  2139. }
  2140. /* Implementation of emit_ops method "emit_ref". */
  2141. static void
  2142. aarch64_emit_ref (int size)
  2143. {
  2144. uint32_t buf[16];
  2145. uint32_t *p = buf;
  2146. switch (size)
  2147. {
  2148. case 1:
  2149. p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
  2150. break;
  2151. case 2:
  2152. p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
  2153. break;
  2154. case 4:
  2155. p += emit_ldr (p, w0, x0, offset_memory_operand (0));
  2156. break;
  2157. case 8:
  2158. p += emit_ldr (p, x0, x0, offset_memory_operand (0));
  2159. break;
  2160. default:
  2161. /* Unknown size, bail on compilation. */
  2162. emit_error = 1;
  2163. break;
  2164. }
  2165. emit_ops_insns (buf, p - buf);
  2166. }
  2167. /* Implementation of emit_ops method "emit_if_goto". */
  2168. static void
  2169. aarch64_emit_if_goto (int *offset_p, int *size_p)
  2170. {
  2171. uint32_t buf[16];
  2172. uint32_t *p = buf;
  2173. /* The Z flag is set or cleared here. */
  2174. p += emit_cmp (p, x0, immediate_operand (0));
  2175. /* This instruction must not change the Z flag. */
  2176. p += emit_pop (p, x0);
  2177. /* Branch over the next instruction if x0 == 0. */
  2178. p += emit_bcond (p, EQ, 8);
  2179. /* The NOP instruction will be patched with an unconditional branch. */
  2180. if (offset_p)
  2181. *offset_p = (p - buf) * 4;
  2182. if (size_p)
  2183. *size_p = 4;
  2184. p += emit_nop (p);
  2185. emit_ops_insns (buf, p - buf);
  2186. }
  2187. /* Implementation of emit_ops method "emit_goto". */
  2188. static void
  2189. aarch64_emit_goto (int *offset_p, int *size_p)
  2190. {
  2191. uint32_t buf[16];
  2192. uint32_t *p = buf;
  2193. /* The NOP instruction will be patched with an unconditional branch. */
  2194. if (offset_p)
  2195. *offset_p = 0;
  2196. if (size_p)
  2197. *size_p = 4;
  2198. p += emit_nop (p);
  2199. emit_ops_insns (buf, p - buf);
  2200. }
  2201. /* Implementation of emit_ops method "write_goto_address". */
  2202. static void
  2203. aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
  2204. {
  2205. uint32_t insn;
  2206. emit_b (&insn, 0, to - from);
  2207. append_insns (&from, 1, &insn);
  2208. }
  2209. /* Implementation of emit_ops method "emit_const". */
  2210. static void
  2211. aarch64_emit_const (LONGEST num)
  2212. {
  2213. uint32_t buf[16];
  2214. uint32_t *p = buf;
  2215. p += emit_mov_addr (p, x0, num);
  2216. emit_ops_insns (buf, p - buf);
  2217. }
  2218. /* Implementation of emit_ops method "emit_call". */
  2219. static void
  2220. aarch64_emit_call (CORE_ADDR fn)
  2221. {
  2222. uint32_t buf[16];
  2223. uint32_t *p = buf;
  2224. p += emit_mov_addr (p, ip0, fn);
  2225. p += emit_blr (p, ip0);
  2226. emit_ops_insns (buf, p - buf);
  2227. }
  2228. /* Implementation of emit_ops method "emit_reg". */
  2229. static void
  2230. aarch64_emit_reg (int reg)
  2231. {
  2232. uint32_t buf[16];
  2233. uint32_t *p = buf;
  2234. /* Set x0 to unsigned char *regs. */
  2235. p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
  2236. p += emit_ldr (p, x0, x0, offset_memory_operand (0));
  2237. p += emit_mov (p, x1, immediate_operand (reg));
  2238. emit_ops_insns (buf, p - buf);
  2239. aarch64_emit_call (get_raw_reg_func_addr ());
  2240. }
  2241. /* Implementation of emit_ops method "emit_pop". */
  2242. static void
  2243. aarch64_emit_pop (void)
  2244. {
  2245. uint32_t buf[16];
  2246. uint32_t *p = buf;
  2247. p += emit_pop (p, x0);
  2248. emit_ops_insns (buf, p - buf);
  2249. }
  2250. /* Implementation of emit_ops method "emit_stack_flush". */
  2251. static void
  2252. aarch64_emit_stack_flush (void)
  2253. {
  2254. uint32_t buf[16];
  2255. uint32_t *p = buf;
  2256. p += emit_push (p, x0);
  2257. emit_ops_insns (buf, p - buf);
  2258. }
  2259. /* Implementation of emit_ops method "emit_zero_ext". */
  2260. static void
  2261. aarch64_emit_zero_ext (int arg)
  2262. {
  2263. uint32_t buf[16];
  2264. uint32_t *p = buf;
  2265. p += emit_ubfx (p, x0, x0, 0, arg);
  2266. emit_ops_insns (buf, p - buf);
  2267. }
  2268. /* Implementation of emit_ops method "emit_swap". */
  2269. static void
  2270. aarch64_emit_swap (void)
  2271. {
  2272. uint32_t buf[16];
  2273. uint32_t *p = buf;
  2274. p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
  2275. p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
  2276. p += emit_mov (p, x0, register_operand (x1));
  2277. emit_ops_insns (buf, p - buf);
  2278. }
  2279. /* Implementation of emit_ops method "emit_stack_adjust". */
  2280. static void
  2281. aarch64_emit_stack_adjust (int n)
  2282. {
  2283. /* This is not needed with our design. */
  2284. uint32_t buf[16];
  2285. uint32_t *p = buf;
  2286. p += emit_add (p, sp, sp, immediate_operand (n * 16));
  2287. emit_ops_insns (buf, p - buf);
  2288. }
  2289. /* Implementation of emit_ops method "emit_int_call_1". */
  2290. static void
  2291. aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
  2292. {
  2293. uint32_t buf[16];
  2294. uint32_t *p = buf;
  2295. p += emit_mov (p, x0, immediate_operand (arg1));
  2296. emit_ops_insns (buf, p - buf);
  2297. aarch64_emit_call (fn);
  2298. }
  2299. /* Implementation of emit_ops method "emit_void_call_2". */
  2300. static void
  2301. aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
  2302. {
  2303. uint32_t buf[16];
  2304. uint32_t *p = buf;
  2305. /* Push x0 on the stack. */
  2306. aarch64_emit_stack_flush ();
  2307. /* Setup arguments for the function call:
  2308. x0: arg1
  2309. x1: top of the stack
  2310. MOV x1, x0
  2311. MOV x0, #arg1 */
  2312. p += emit_mov (p, x1, register_operand (x0));
  2313. p += emit_mov (p, x0, immediate_operand (arg1));
  2314. emit_ops_insns (buf, p - buf);
  2315. aarch64_emit_call (fn);
  2316. /* Restore x0. */
  2317. aarch64_emit_pop ();
  2318. }
  2319. /* Implementation of emit_ops method "emit_eq_goto". */
  2320. static void
  2321. aarch64_emit_eq_goto (int *offset_p, int *size_p)
  2322. {
  2323. uint32_t buf[16];
  2324. uint32_t *p = buf;
  2325. p += emit_pop (p, x1);
  2326. p += emit_cmp (p, x1, register_operand (x0));
  2327. /* Branch over the next instruction if x0 != x1. */
  2328. p += emit_bcond (p, NE, 8);
  2329. /* The NOP instruction will be patched with an unconditional branch. */
  2330. if (offset_p)
  2331. *offset_p = (p - buf) * 4;
  2332. if (size_p)
  2333. *size_p = 4;
  2334. p += emit_nop (p);
  2335. emit_ops_insns (buf, p - buf);
  2336. }
  2337. /* Implementation of emit_ops method "emit_ne_goto". */
  2338. static void
  2339. aarch64_emit_ne_goto (int *offset_p, int *size_p)
  2340. {
  2341. uint32_t buf[16];
  2342. uint32_t *p = buf;
  2343. p += emit_pop (p, x1);
  2344. p += emit_cmp (p, x1, register_operand (x0));
  2345. /* Branch over the next instruction if x0 == x1. */
  2346. p += emit_bcond (p, EQ, 8);
  2347. /* The NOP instruction will be patched with an unconditional branch. */
  2348. if (offset_p)
  2349. *offset_p = (p - buf) * 4;
  2350. if (size_p)
  2351. *size_p = 4;
  2352. p += emit_nop (p);
  2353. emit_ops_insns (buf, p - buf);
  2354. }
  2355. /* Implementation of emit_ops method "emit_lt_goto". */
  2356. static void
  2357. aarch64_emit_lt_goto (int *offset_p, int *size_p)
  2358. {
  2359. uint32_t buf[16];
  2360. uint32_t *p = buf;
  2361. p += emit_pop (p, x1);
  2362. p += emit_cmp (p, x1, register_operand (x0));
  2363. /* Branch over the next instruction if x0 >= x1. */
  2364. p += emit_bcond (p, GE, 8);
  2365. /* The NOP instruction will be patched with an unconditional branch. */
  2366. if (offset_p)
  2367. *offset_p = (p - buf) * 4;
  2368. if (size_p)
  2369. *size_p = 4;
  2370. p += emit_nop (p);
  2371. emit_ops_insns (buf, p - buf);
  2372. }
  2373. /* Implementation of emit_ops method "emit_le_goto". */
  2374. static void
  2375. aarch64_emit_le_goto (int *offset_p, int *size_p)
  2376. {
  2377. uint32_t buf[16];
  2378. uint32_t *p = buf;
  2379. p += emit_pop (p, x1);
  2380. p += emit_cmp (p, x1, register_operand (x0));
  2381. /* Branch over the next instruction if x0 > x1. */
  2382. p += emit_bcond (p, GT, 8);
  2383. /* The NOP instruction will be patched with an unconditional branch. */
  2384. if (offset_p)
  2385. *offset_p = (p - buf) * 4;
  2386. if (size_p)
  2387. *size_p = 4;
  2388. p += emit_nop (p);
  2389. emit_ops_insns (buf, p - buf);
  2390. }
  2391. /* Implementation of emit_ops method "emit_gt_goto". */
  2392. static void
  2393. aarch64_emit_gt_goto (int *offset_p, int *size_p)
  2394. {
  2395. uint32_t buf[16];
  2396. uint32_t *p = buf;
  2397. p += emit_pop (p, x1);
  2398. p += emit_cmp (p, x1, register_operand (x0));
  2399. /* Branch over the next instruction if x0 <= x1. */
  2400. p += emit_bcond (p, LE, 8);
  2401. /* The NOP instruction will be patched with an unconditional branch. */
  2402. if (offset_p)
  2403. *offset_p = (p - buf) * 4;
  2404. if (size_p)
  2405. *size_p = 4;
  2406. p += emit_nop (p);
  2407. emit_ops_insns (buf, p - buf);
  2408. }
  2409. /* Implementation of emit_ops method "emit_ge_got". */
  2410. static void
  2411. aarch64_emit_ge_got (int *offset_p, int *size_p)
  2412. {
  2413. uint32_t buf[16];
  2414. uint32_t *p = buf;
  2415. p += emit_pop (p, x1);
  2416. p += emit_cmp (p, x1, register_operand (x0));
  2417. /* Branch over the next instruction if x0 <= x1. */
  2418. p += emit_bcond (p, LT, 8);
  2419. /* The NOP instruction will be patched with an unconditional branch. */
  2420. if (offset_p)
  2421. *offset_p = (p - buf) * 4;
  2422. if (size_p)
  2423. *size_p = 4;
  2424. p += emit_nop (p);
  2425. emit_ops_insns (buf, p - buf);
  2426. }
  2427. static struct emit_ops aarch64_emit_ops_impl =
  2428. {
  2429. aarch64_emit_prologue,
  2430. aarch64_emit_epilogue,
  2431. aarch64_emit_add,
  2432. aarch64_emit_sub,
  2433. aarch64_emit_mul,
  2434. aarch64_emit_lsh,
  2435. aarch64_emit_rsh_signed,
  2436. aarch64_emit_rsh_unsigned,
  2437. aarch64_emit_ext,
  2438. aarch64_emit_log_not,
  2439. aarch64_emit_bit_and,
  2440. aarch64_emit_bit_or,
  2441. aarch64_emit_bit_xor,
  2442. aarch64_emit_bit_not,
  2443. aarch64_emit_equal,
  2444. aarch64_emit_less_signed,
  2445. aarch64_emit_less_unsigned,
  2446. aarch64_emit_ref,
  2447. aarch64_emit_if_goto,
  2448. aarch64_emit_goto,
  2449. aarch64_write_goto_address,
  2450. aarch64_emit_const,
  2451. aarch64_emit_call,
  2452. aarch64_emit_reg,
  2453. aarch64_emit_pop,
  2454. aarch64_emit_stack_flush,
  2455. aarch64_emit_zero_ext,
  2456. aarch64_emit_swap,
  2457. aarch64_emit_stack_adjust,
  2458. aarch64_emit_int_call_1,
  2459. aarch64_emit_void_call_2,
  2460. aarch64_emit_eq_goto,
  2461. aarch64_emit_ne_goto,
  2462. aarch64_emit_lt_goto,
  2463. aarch64_emit_le_goto,
  2464. aarch64_emit_gt_goto,
  2465. aarch64_emit_ge_got,
  2466. };
  2467. /* Implementation of target ops method "emit_ops". */
  2468. emit_ops *
  2469. aarch64_target::emit_ops ()
  2470. {
  2471. return &aarch64_emit_ops_impl;
  2472. }
  2473. /* Implementation of target ops method
  2474. "get_min_fast_tracepoint_insn_len". */
  2475. int
  2476. aarch64_target::get_min_fast_tracepoint_insn_len ()
  2477. {
  2478. return 4;
  2479. }
  2480. /* Implementation of linux target ops method "low_supports_range_stepping". */
  2481. bool
  2482. aarch64_target::low_supports_range_stepping ()
  2483. {
  2484. return true;
  2485. }
  2486. /* Implementation of target ops method "sw_breakpoint_from_kind". */
  2487. const gdb_byte *
  2488. aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
  2489. {
  2490. if (is_64bit_tdesc ())
  2491. {
  2492. *size = aarch64_breakpoint_len;
  2493. return aarch64_breakpoint;
  2494. }
  2495. else
  2496. return arm_sw_breakpoint_from_kind (kind, size);
  2497. }
  2498. /* Implementation of target ops method "breakpoint_kind_from_pc". */
  2499. int
  2500. aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
  2501. {
  2502. if (is_64bit_tdesc ())
  2503. return aarch64_breakpoint_len;
  2504. else
  2505. return arm_breakpoint_kind_from_pc (pcptr);
  2506. }
  2507. /* Implementation of the target ops method
  2508. "breakpoint_kind_from_current_state". */
  2509. int
  2510. aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
  2511. {
  2512. if (is_64bit_tdesc ())
  2513. return aarch64_breakpoint_len;
  2514. else
  2515. return arm_breakpoint_kind_from_current_state (pcptr);
  2516. }
  2517. /* Returns true if memory tagging is supported. */
  2518. bool
  2519. aarch64_target::supports_memory_tagging ()
  2520. {
  2521. if (current_thread == NULL)
  2522. {
  2523. /* We don't have any processes running, so don't attempt to
  2524. use linux_get_hwcap2 as it will try to fetch the current
  2525. thread id. Instead, just fetch the auxv from the self
  2526. PID. */
  2527. #ifdef HAVE_GETAUXVAL
  2528. return (getauxval (AT_HWCAP2) & HWCAP2_MTE) != 0;
  2529. #else
  2530. return true;
  2531. #endif
  2532. }
  2533. return (linux_get_hwcap2 (8) & HWCAP2_MTE) != 0;
  2534. }
  2535. bool
  2536. aarch64_target::fetch_memtags (CORE_ADDR address, size_t len,
  2537. gdb::byte_vector &tags, int type)
  2538. {
  2539. /* Allocation tags are per-process, so any tid is fine. */
  2540. int tid = lwpid_of (current_thread);
  2541. /* Allocation tag? */
  2542. if (type == static_cast <int> (aarch64_memtag_type::mte_allocation))
  2543. return aarch64_mte_fetch_memtags (tid, address, len, tags);
  2544. return false;
  2545. }
  2546. bool
  2547. aarch64_target::store_memtags (CORE_ADDR address, size_t len,
  2548. const gdb::byte_vector &tags, int type)
  2549. {
  2550. /* Allocation tags are per-process, so any tid is fine. */
  2551. int tid = lwpid_of (current_thread);
  2552. /* Allocation tag? */
  2553. if (type == static_cast <int> (aarch64_memtag_type::mte_allocation))
  2554. return aarch64_mte_store_memtags (tid, address, len, tags);
  2555. return false;
  2556. }
  2557. /* The linux target ops object. */
  2558. linux_process_target *the_linux_target = &the_aarch64_target;
  2559. void
  2560. initialize_low_arch (void)
  2561. {
  2562. initialize_low_arch_aarch32 ();
  2563. initialize_regsets_info (&aarch64_regsets_info);
  2564. }