elf32-rx.c 104 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107
  1. /* Renesas RX specific support for 32-bit ELF.
  2. Copyright (C) 2008-2022 Free Software Foundation, Inc.
  3. This file is part of BFD, the Binary File Descriptor library.
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 3 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
  15. MA 02110-1301, USA. */
  16. #include "sysdep.h"
  17. #include "bfd.h"
  18. #include "libbfd.h"
  19. #include "elf-bfd.h"
  20. #include "elf/rx.h"
  21. #include "libiberty.h"
  22. #include "elf32-rx.h"
  23. #define RX_OPCODE_BIG_ENDIAN 0
  24. /* This is a meta-target that's used only with objcopy, to avoid the
  25. endian-swap we would otherwise get. We check for this in
  26. rx_elf_object_p(). */
  27. const bfd_target rx_elf32_be_ns_vec;
  28. const bfd_target rx_elf32_be_vec;
  29. #ifdef DEBUG
  30. char * rx_get_reloc (long);
  31. void rx_dump_symtab (bfd *, void *, void *);
  32. #endif
  33. #define RXREL(n,sz,bit,shift,complain,pcrel) \
  34. HOWTO (R_RX_##n, shift, sz, bit, pcrel, 0, complain_overflow_ ## complain, \
  35. bfd_elf_generic_reloc, "R_RX_" #n, false, 0, ~0, false)
  36. /* Note that the relocations around 0x7f are internal to this file;
  37. feel free to move them as needed to avoid conflicts with published
  38. relocation numbers. */
  39. static reloc_howto_type rx_elf_howto_table [] =
  40. {
  41. RXREL (NONE, 3, 0, 0, dont, false),
  42. RXREL (DIR32, 2, 32, 0, signed, false),
  43. RXREL (DIR24S, 2, 24, 0, signed, false),
  44. RXREL (DIR16, 1, 16, 0, dont, false),
  45. RXREL (DIR16U, 1, 16, 0, unsigned, false),
  46. RXREL (DIR16S, 1, 16, 0, signed, false),
  47. RXREL (DIR8, 0, 8, 0, dont, false),
  48. RXREL (DIR8U, 0, 8, 0, unsigned, false),
  49. RXREL (DIR8S, 0, 8, 0, signed, false),
  50. RXREL (DIR24S_PCREL, 2, 24, 0, signed, true),
  51. RXREL (DIR16S_PCREL, 1, 16, 0, signed, true),
  52. RXREL (DIR8S_PCREL, 0, 8, 0, signed, true),
  53. RXREL (DIR16UL, 1, 16, 2, unsigned, false),
  54. RXREL (DIR16UW, 1, 16, 1, unsigned, false),
  55. RXREL (DIR8UL, 0, 8, 2, unsigned, false),
  56. RXREL (DIR8UW, 0, 8, 1, unsigned, false),
  57. RXREL (DIR32_REV, 1, 16, 0, dont, false),
  58. RXREL (DIR16_REV, 1, 16, 0, dont, false),
  59. RXREL (DIR3U_PCREL, 0, 3, 0, dont, true),
  60. EMPTY_HOWTO (0x13),
  61. EMPTY_HOWTO (0x14),
  62. EMPTY_HOWTO (0x15),
  63. EMPTY_HOWTO (0x16),
  64. EMPTY_HOWTO (0x17),
  65. EMPTY_HOWTO (0x18),
  66. EMPTY_HOWTO (0x19),
  67. EMPTY_HOWTO (0x1a),
  68. EMPTY_HOWTO (0x1b),
  69. EMPTY_HOWTO (0x1c),
  70. EMPTY_HOWTO (0x1d),
  71. EMPTY_HOWTO (0x1e),
  72. EMPTY_HOWTO (0x1f),
  73. RXREL (RH_3_PCREL, 0, 3, 0, signed, true),
  74. RXREL (RH_16_OP, 1, 16, 0, signed, false),
  75. RXREL (RH_24_OP, 2, 24, 0, signed, false),
  76. RXREL (RH_32_OP, 2, 32, 0, signed, false),
  77. RXREL (RH_24_UNS, 2, 24, 0, unsigned, false),
  78. RXREL (RH_8_NEG, 0, 8, 0, signed, false),
  79. RXREL (RH_16_NEG, 1, 16, 0, signed, false),
  80. RXREL (RH_24_NEG, 2, 24, 0, signed, false),
  81. RXREL (RH_32_NEG, 2, 32, 0, signed, false),
  82. RXREL (RH_DIFF, 2, 32, 0, signed, false),
  83. RXREL (RH_GPRELB, 1, 16, 0, unsigned, false),
  84. RXREL (RH_GPRELW, 1, 16, 0, unsigned, false),
  85. RXREL (RH_GPRELL, 1, 16, 0, unsigned, false),
  86. RXREL (RH_RELAX, 0, 0, 0, dont, false),
  87. EMPTY_HOWTO (0x2e),
  88. EMPTY_HOWTO (0x2f),
  89. EMPTY_HOWTO (0x30),
  90. EMPTY_HOWTO (0x31),
  91. EMPTY_HOWTO (0x32),
  92. EMPTY_HOWTO (0x33),
  93. EMPTY_HOWTO (0x34),
  94. EMPTY_HOWTO (0x35),
  95. EMPTY_HOWTO (0x36),
  96. EMPTY_HOWTO (0x37),
  97. EMPTY_HOWTO (0x38),
  98. EMPTY_HOWTO (0x39),
  99. EMPTY_HOWTO (0x3a),
  100. EMPTY_HOWTO (0x3b),
  101. EMPTY_HOWTO (0x3c),
  102. EMPTY_HOWTO (0x3d),
  103. EMPTY_HOWTO (0x3e),
  104. EMPTY_HOWTO (0x3f),
  105. EMPTY_HOWTO (0x40),
  106. RXREL (ABS32, 2, 32, 0, dont, false),
  107. RXREL (ABS24S, 2, 24, 0, signed, false),
  108. RXREL (ABS16, 1, 16, 0, dont, false),
  109. RXREL (ABS16U, 1, 16, 0, unsigned, false),
  110. RXREL (ABS16S, 1, 16, 0, signed, false),
  111. RXREL (ABS8, 0, 8, 0, dont, false),
  112. RXREL (ABS8U, 0, 8, 0, unsigned, false),
  113. RXREL (ABS8S, 0, 8, 0, signed, false),
  114. RXREL (ABS24S_PCREL, 2, 24, 0, signed, true),
  115. RXREL (ABS16S_PCREL, 1, 16, 0, signed, true),
  116. RXREL (ABS8S_PCREL, 0, 8, 0, signed, true),
  117. RXREL (ABS16UL, 1, 16, 0, unsigned, false),
  118. RXREL (ABS16UW, 1, 16, 0, unsigned, false),
  119. RXREL (ABS8UL, 0, 8, 0, unsigned, false),
  120. RXREL (ABS8UW, 0, 8, 0, unsigned, false),
  121. RXREL (ABS32_REV, 2, 32, 0, dont, false),
  122. RXREL (ABS16_REV, 1, 16, 0, dont, false),
  123. #define STACK_REL_P(x) ((x) <= R_RX_ABS16_REV && (x) >= R_RX_ABS32)
  124. EMPTY_HOWTO (0x52),
  125. EMPTY_HOWTO (0x53),
  126. EMPTY_HOWTO (0x54),
  127. EMPTY_HOWTO (0x55),
  128. EMPTY_HOWTO (0x56),
  129. EMPTY_HOWTO (0x57),
  130. EMPTY_HOWTO (0x58),
  131. EMPTY_HOWTO (0x59),
  132. EMPTY_HOWTO (0x5a),
  133. EMPTY_HOWTO (0x5b),
  134. EMPTY_HOWTO (0x5c),
  135. EMPTY_HOWTO (0x5d),
  136. EMPTY_HOWTO (0x5e),
  137. EMPTY_HOWTO (0x5f),
  138. EMPTY_HOWTO (0x60),
  139. EMPTY_HOWTO (0x61),
  140. EMPTY_HOWTO (0x62),
  141. EMPTY_HOWTO (0x63),
  142. EMPTY_HOWTO (0x64),
  143. EMPTY_HOWTO (0x65),
  144. EMPTY_HOWTO (0x66),
  145. EMPTY_HOWTO (0x67),
  146. EMPTY_HOWTO (0x68),
  147. EMPTY_HOWTO (0x69),
  148. EMPTY_HOWTO (0x6a),
  149. EMPTY_HOWTO (0x6b),
  150. EMPTY_HOWTO (0x6c),
  151. EMPTY_HOWTO (0x6d),
  152. EMPTY_HOWTO (0x6e),
  153. EMPTY_HOWTO (0x6f),
  154. EMPTY_HOWTO (0x70),
  155. EMPTY_HOWTO (0x71),
  156. EMPTY_HOWTO (0x72),
  157. EMPTY_HOWTO (0x73),
  158. EMPTY_HOWTO (0x74),
  159. EMPTY_HOWTO (0x75),
  160. EMPTY_HOWTO (0x76),
  161. EMPTY_HOWTO (0x77),
  162. /* These are internal. */
  163. /* A 5-bit unsigned displacement to a B/W/L address, at bit position 8/12. */
  164. /* ---- ---- 4--- 3210. */
  165. #define R_RX_RH_ABS5p8B 0x78
  166. RXREL (RH_ABS5p8B, 0, 0, 0, dont, false),
  167. #define R_RX_RH_ABS5p8W 0x79
  168. RXREL (RH_ABS5p8W, 0, 0, 0, dont, false),
  169. #define R_RX_RH_ABS5p8L 0x7a
  170. RXREL (RH_ABS5p8L, 0, 0, 0, dont, false),
  171. /* A 5-bit unsigned displacement to a B/W/L address, at bit position 5/12. */
  172. /* ---- -432 1--- 0---. */
  173. #define R_RX_RH_ABS5p5B 0x7b
  174. RXREL (RH_ABS5p5B, 0, 0, 0, dont, false),
  175. #define R_RX_RH_ABS5p5W 0x7c
  176. RXREL (RH_ABS5p5W, 0, 0, 0, dont, false),
  177. #define R_RX_RH_ABS5p5L 0x7d
  178. RXREL (RH_ABS5p5L, 0, 0, 0, dont, false),
  179. /* A 4-bit unsigned immediate at bit position 8. */
  180. #define R_RX_RH_UIMM4p8 0x7e
  181. RXREL (RH_UIMM4p8, 0, 0, 0, dont, false),
  182. /* A 4-bit negative unsigned immediate at bit position 8. */
  183. #define R_RX_RH_UNEG4p8 0x7f
  184. RXREL (RH_UNEG4p8, 0, 0, 0, dont, false),
  185. /* End of internal relocs. */
  186. RXREL (SYM, 2, 32, 0, dont, false),
  187. RXREL (OPneg, 2, 32, 0, dont, false),
  188. RXREL (OPadd, 2, 32, 0, dont, false),
  189. RXREL (OPsub, 2, 32, 0, dont, false),
  190. RXREL (OPmul, 2, 32, 0, dont, false),
  191. RXREL (OPdiv, 2, 32, 0, dont, false),
  192. RXREL (OPshla, 2, 32, 0, dont, false),
  193. RXREL (OPshra, 2, 32, 0, dont, false),
  194. RXREL (OPsctsize, 2, 32, 0, dont, false),
  195. EMPTY_HOWTO (0x89),
  196. EMPTY_HOWTO (0x8a),
  197. EMPTY_HOWTO (0x8b),
  198. EMPTY_HOWTO (0x8c),
  199. RXREL (OPscttop, 2, 32, 0, dont, false),
  200. EMPTY_HOWTO (0x8e),
  201. EMPTY_HOWTO (0x8f),
  202. RXREL (OPand, 2, 32, 0, dont, false),
  203. RXREL (OPor, 2, 32, 0, dont, false),
  204. RXREL (OPxor, 2, 32, 0, dont, false),
  205. RXREL (OPnot, 2, 32, 0, dont, false),
  206. RXREL (OPmod, 2, 32, 0, dont, false),
  207. RXREL (OPromtop, 2, 32, 0, dont, false),
  208. RXREL (OPramtop, 2, 32, 0, dont, false)
  209. };
  210. /* Map BFD reloc types to RX ELF reloc types. */
  211. struct rx_reloc_map
  212. {
  213. bfd_reloc_code_real_type bfd_reloc_val;
  214. unsigned int rx_reloc_val;
  215. };
  216. static const struct rx_reloc_map rx_reloc_map [] =
  217. {
  218. { BFD_RELOC_NONE, R_RX_NONE },
  219. { BFD_RELOC_8, R_RX_DIR8S },
  220. { BFD_RELOC_16, R_RX_DIR16S },
  221. { BFD_RELOC_24, R_RX_DIR24S },
  222. { BFD_RELOC_32, R_RX_DIR32 },
  223. { BFD_RELOC_RX_16_OP, R_RX_DIR16 },
  224. { BFD_RELOC_RX_DIR3U_PCREL, R_RX_DIR3U_PCREL },
  225. { BFD_RELOC_8_PCREL, R_RX_DIR8S_PCREL },
  226. { BFD_RELOC_16_PCREL, R_RX_DIR16S_PCREL },
  227. { BFD_RELOC_24_PCREL, R_RX_DIR24S_PCREL },
  228. { BFD_RELOC_RX_8U, R_RX_DIR8U },
  229. { BFD_RELOC_RX_16U, R_RX_DIR16U },
  230. { BFD_RELOC_RX_24U, R_RX_RH_24_UNS },
  231. { BFD_RELOC_RX_NEG8, R_RX_RH_8_NEG },
  232. { BFD_RELOC_RX_NEG16, R_RX_RH_16_NEG },
  233. { BFD_RELOC_RX_NEG24, R_RX_RH_24_NEG },
  234. { BFD_RELOC_RX_NEG32, R_RX_RH_32_NEG },
  235. { BFD_RELOC_RX_DIFF, R_RX_RH_DIFF },
  236. { BFD_RELOC_RX_GPRELB, R_RX_RH_GPRELB },
  237. { BFD_RELOC_RX_GPRELW, R_RX_RH_GPRELW },
  238. { BFD_RELOC_RX_GPRELL, R_RX_RH_GPRELL },
  239. { BFD_RELOC_RX_RELAX, R_RX_RH_RELAX },
  240. { BFD_RELOC_RX_SYM, R_RX_SYM },
  241. { BFD_RELOC_RX_OP_SUBTRACT, R_RX_OPsub },
  242. { BFD_RELOC_RX_OP_NEG, R_RX_OPneg },
  243. { BFD_RELOC_RX_ABS8, R_RX_ABS8 },
  244. { BFD_RELOC_RX_ABS16, R_RX_ABS16 },
  245. { BFD_RELOC_RX_ABS16_REV, R_RX_ABS16_REV },
  246. { BFD_RELOC_RX_ABS32, R_RX_ABS32 },
  247. { BFD_RELOC_RX_ABS32_REV, R_RX_ABS32_REV },
  248. { BFD_RELOC_RX_ABS16UL, R_RX_ABS16UL },
  249. { BFD_RELOC_RX_ABS16UW, R_RX_ABS16UW },
  250. { BFD_RELOC_RX_ABS16U, R_RX_ABS16U }
  251. };
  252. #define BIGE(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
  253. static reloc_howto_type *
  254. rx_reloc_type_lookup (bfd * abfd ATTRIBUTE_UNUSED,
  255. bfd_reloc_code_real_type code)
  256. {
  257. unsigned int i;
  258. if (code == BFD_RELOC_RX_32_OP)
  259. return rx_elf_howto_table + R_RX_DIR32;
  260. for (i = ARRAY_SIZE (rx_reloc_map); i--;)
  261. if (rx_reloc_map [i].bfd_reloc_val == code)
  262. return rx_elf_howto_table + rx_reloc_map[i].rx_reloc_val;
  263. return NULL;
  264. }
  265. static reloc_howto_type *
  266. rx_reloc_name_lookup (bfd * abfd ATTRIBUTE_UNUSED, const char * r_name)
  267. {
  268. unsigned int i;
  269. for (i = 0; i < ARRAY_SIZE (rx_elf_howto_table); i++)
  270. if (rx_elf_howto_table[i].name != NULL
  271. && strcasecmp (rx_elf_howto_table[i].name, r_name) == 0)
  272. return rx_elf_howto_table + i;
  273. return NULL;
  274. }
  275. /* Set the howto pointer for an RX ELF reloc. */
  276. static bool
  277. rx_info_to_howto_rela (bfd * abfd,
  278. arelent * cache_ptr,
  279. Elf_Internal_Rela * dst)
  280. {
  281. unsigned int r_type;
  282. r_type = ELF32_R_TYPE (dst->r_info);
  283. BFD_ASSERT (R_RX_max == ARRAY_SIZE (rx_elf_howto_table));
  284. if (r_type >= ARRAY_SIZE (rx_elf_howto_table))
  285. {
  286. /* xgettext:c-format */
  287. _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
  288. abfd, r_type);
  289. bfd_set_error (bfd_error_bad_value);
  290. return false;
  291. }
  292. cache_ptr->howto = rx_elf_howto_table + r_type;
  293. if (cache_ptr->howto->name == NULL)
  294. {
  295. /* xgettext:c-format */
  296. _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
  297. abfd, r_type);
  298. bfd_set_error (bfd_error_bad_value);
  299. return false;
  300. }
  301. return true;
  302. }
  303. static bfd_vma
  304. get_symbol_value (const char * name,
  305. struct bfd_link_info * info,
  306. bfd * input_bfd,
  307. asection * input_section,
  308. int offset)
  309. {
  310. bfd_vma value = 0;
  311. struct bfd_link_hash_entry * h;
  312. h = bfd_link_hash_lookup (info->hash, name, false, false, true);
  313. if (h == NULL
  314. || (h->type != bfd_link_hash_defined
  315. && h->type != bfd_link_hash_defweak))
  316. (*info->callbacks->undefined_symbol)
  317. (info, name, input_bfd, input_section, offset, true);
  318. else
  319. value = (h->u.def.value
  320. + h->u.def.section->output_section->vma
  321. + h->u.def.section->output_offset);
  322. return value;
  323. }
  324. static bfd_vma
  325. get_symbol_value_maybe (const char * name,
  326. struct bfd_link_info * info)
  327. {
  328. bfd_vma value = 0;
  329. struct bfd_link_hash_entry * h;
  330. h = bfd_link_hash_lookup (info->hash, name, false, false, true);
  331. if (h == NULL
  332. || (h->type != bfd_link_hash_defined
  333. && h->type != bfd_link_hash_defweak))
  334. return 0;
  335. else
  336. value = (h->u.def.value
  337. + h->u.def.section->output_section->vma
  338. + h->u.def.section->output_offset);
  339. return value;
  340. }
  341. static bfd_vma
  342. get_gp (struct bfd_link_info * info,
  343. bfd * abfd,
  344. asection * sec,
  345. int offset)
  346. {
  347. static bool cached = false;
  348. static bfd_vma cached_value = 0;
  349. if (!cached)
  350. {
  351. cached_value = get_symbol_value ("__gp", info, abfd, sec, offset);
  352. cached = true;
  353. }
  354. return cached_value;
  355. }
  356. static bfd_vma
  357. get_romstart (struct bfd_link_info * info,
  358. bfd * abfd,
  359. asection * sec,
  360. int offset)
  361. {
  362. static bool cached = false;
  363. static bfd_vma cached_value = 0;
  364. if (!cached)
  365. {
  366. cached_value = get_symbol_value ("_start", info, abfd, sec, offset);
  367. cached = true;
  368. }
  369. return cached_value;
  370. }
  371. static bfd_vma
  372. get_ramstart (struct bfd_link_info * info,
  373. bfd * abfd,
  374. asection * sec,
  375. int offset)
  376. {
  377. static bool cached = false;
  378. static bfd_vma cached_value = 0;
  379. if (!cached)
  380. {
  381. cached_value = get_symbol_value ("__datastart", info, abfd, sec, offset);
  382. cached = true;
  383. }
  384. return cached_value;
  385. }
  386. #define NUM_STACK_ENTRIES 16
  387. static int32_t rx_stack [ NUM_STACK_ENTRIES ];
  388. static unsigned int rx_stack_top;
  389. #define RX_STACK_PUSH(val) \
  390. do \
  391. { \
  392. if (rx_stack_top < NUM_STACK_ENTRIES) \
  393. rx_stack [rx_stack_top ++] = (val); \
  394. else \
  395. r = bfd_reloc_dangerous; \
  396. } \
  397. while (0)
  398. #define RX_STACK_POP(dest) \
  399. do \
  400. { \
  401. if (rx_stack_top > 0) \
  402. (dest) = rx_stack [-- rx_stack_top]; \
  403. else \
  404. (dest) = 0, r = bfd_reloc_dangerous; \
  405. } \
  406. while (0)
  407. /* Relocate an RX ELF section.
  408. There is some attempt to make this function usable for many architectures,
  409. both USE_REL and USE_RELA ['twould be nice if such a critter existed],
  410. if only to serve as a learning tool.
  411. The RELOCATE_SECTION function is called by the new ELF backend linker
  412. to handle the relocations for a section.
  413. The relocs are always passed as Rela structures; if the section
  414. actually uses Rel structures, the r_addend field will always be
  415. zero.
  416. This function is responsible for adjusting the section contents as
  417. necessary, and (if using Rela relocs and generating a relocatable
  418. output file) adjusting the reloc addend as necessary.
  419. This function does not have to worry about setting the reloc
  420. address or the reloc symbol index.
  421. LOCAL_SYMS is a pointer to the swapped in local symbols.
  422. LOCAL_SECTIONS is an array giving the section in the input file
  423. corresponding to the st_shndx field of each local symbol.
  424. The global hash table entry for the global symbols can be found
  425. via elf_sym_hashes (input_bfd).
  426. When generating relocatable output, this function must handle
  427. STB_LOCAL/STT_SECTION symbols specially. The output symbol is
  428. going to be the section symbol corresponding to the output
  429. section, which means that the addend must be adjusted
  430. accordingly. */
  431. static int
  432. rx_elf_relocate_section
  433. (bfd * output_bfd,
  434. struct bfd_link_info * info,
  435. bfd * input_bfd,
  436. asection * input_section,
  437. bfd_byte * contents,
  438. Elf_Internal_Rela * relocs,
  439. Elf_Internal_Sym * local_syms,
  440. asection ** local_sections)
  441. {
  442. Elf_Internal_Shdr *symtab_hdr;
  443. struct elf_link_hash_entry **sym_hashes;
  444. Elf_Internal_Rela *rel;
  445. Elf_Internal_Rela *relend;
  446. bool pid_mode;
  447. bool saw_subtract = false;
  448. const char *table_default_cache = NULL;
  449. bfd_vma table_start_cache = 0;
  450. bfd_vma table_end_cache = 0;
  451. if (elf_elfheader (output_bfd)->e_flags & E_FLAG_RX_PID)
  452. pid_mode = true;
  453. else
  454. pid_mode = false;
  455. symtab_hdr = & elf_tdata (input_bfd)->symtab_hdr;
  456. sym_hashes = elf_sym_hashes (input_bfd);
  457. relend = relocs + input_section->reloc_count;
  458. for (rel = relocs; rel < relend; rel ++)
  459. {
  460. reloc_howto_type *howto;
  461. unsigned long r_symndx;
  462. Elf_Internal_Sym *sym;
  463. asection *sec;
  464. struct elf_link_hash_entry *h;
  465. bfd_vma relocation;
  466. bfd_reloc_status_type r;
  467. const char * name = NULL;
  468. bool unresolved_reloc = true;
  469. int r_type;
  470. r_type = ELF32_R_TYPE (rel->r_info);
  471. r_symndx = ELF32_R_SYM (rel->r_info);
  472. howto = rx_elf_howto_table + ELF32_R_TYPE (rel->r_info);
  473. h = NULL;
  474. sym = NULL;
  475. sec = NULL;
  476. relocation = 0;
  477. if (rx_stack_top == 0)
  478. saw_subtract = false;
  479. if (r_symndx < symtab_hdr->sh_info)
  480. {
  481. sym = local_syms + r_symndx;
  482. sec = local_sections [r_symndx];
  483. relocation = _bfd_elf_rela_local_sym (output_bfd, sym, & sec, rel);
  484. name = bfd_elf_string_from_elf_section
  485. (input_bfd, symtab_hdr->sh_link, sym->st_name);
  486. name = sym->st_name == 0 ? bfd_section_name (sec) : name;
  487. }
  488. else
  489. {
  490. bool warned, ignored;
  491. RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
  492. r_symndx, symtab_hdr, sym_hashes, h,
  493. sec, relocation, unresolved_reloc,
  494. warned, ignored);
  495. name = h->root.root.string;
  496. }
  497. if (startswith (name, "$tableentry$default$"))
  498. {
  499. bfd_vma entry_vma;
  500. int idx;
  501. char *buf;
  502. if (table_default_cache != name)
  503. {
  504. /* All relocs for a given table should be to the same
  505. (weak) default symbol) so we can use it to detect a
  506. cache miss. We use the offset into the table to find
  507. the "real" symbol. Calculate and store the table's
  508. offset here. */
  509. table_default_cache = name;
  510. /* We have already done error checking in rx_table_find(). */
  511. buf = (char *) bfd_malloc (13 + strlen (name + 20));
  512. if (buf == NULL)
  513. return false;
  514. sprintf (buf, "$tablestart$%s", name + 20);
  515. table_start_cache = get_symbol_value (buf,
  516. info,
  517. input_bfd,
  518. input_section,
  519. rel->r_offset);
  520. sprintf (buf, "$tableend$%s", name + 20);
  521. table_end_cache = get_symbol_value (buf,
  522. info,
  523. input_bfd,
  524. input_section,
  525. rel->r_offset);
  526. free (buf);
  527. }
  528. entry_vma = (input_section->output_section->vma
  529. + input_section->output_offset
  530. + rel->r_offset);
  531. if (table_end_cache <= entry_vma || entry_vma < table_start_cache)
  532. {
  533. /* xgettext:c-format */
  534. _bfd_error_handler (_("%pB:%pA: table entry %s outside table"),
  535. input_bfd, input_section,
  536. name);
  537. }
  538. else if ((int) (entry_vma - table_start_cache) % 4)
  539. {
  540. /* xgettext:c-format */
  541. _bfd_error_handler (_("%pB:%pA: table entry %s not word-aligned within table"),
  542. input_bfd, input_section,
  543. name);
  544. }
  545. else
  546. {
  547. idx = (int) (entry_vma - table_start_cache) / 4;
  548. /* This will look like $tableentry$<N>$<name> */
  549. buf = (char *) bfd_malloc (12 + 20 + strlen (name + 20));
  550. if (buf == NULL)
  551. return false;
  552. sprintf (buf, "$tableentry$%d$%s", idx, name + 20);
  553. h = (struct elf_link_hash_entry *) bfd_link_hash_lookup (info->hash, buf, false, false, true);
  554. if (h)
  555. {
  556. relocation = (h->root.u.def.value
  557. + h->root.u.def.section->output_section->vma
  558. + h->root.u.def.section->output_offset);;
  559. }
  560. free (buf);
  561. }
  562. }
  563. if (sec != NULL && discarded_section (sec))
  564. RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
  565. rel, 1, relend, howto, 0, contents);
  566. if (bfd_link_relocatable (info))
  567. {
  568. /* This is a relocatable link. We don't have to change
  569. anything, unless the reloc is against a section symbol,
  570. in which case we have to adjust according to where the
  571. section symbol winds up in the output section. */
  572. if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
  573. rel->r_addend += sec->output_offset;
  574. continue;
  575. }
  576. if (h != NULL && h->root.type == bfd_link_hash_undefweak)
  577. /* If the symbol is undefined and weak
  578. then the relocation resolves to zero. */
  579. relocation = 0;
  580. else
  581. {
  582. if (howto->pc_relative)
  583. {
  584. relocation -= (input_section->output_section->vma
  585. + input_section->output_offset
  586. + rel->r_offset);
  587. if (r_type != R_RX_RH_3_PCREL
  588. && r_type != R_RX_DIR3U_PCREL)
  589. relocation ++;
  590. }
  591. relocation += rel->r_addend;
  592. }
  593. r = bfd_reloc_ok;
  594. #define RANGE(a,b) \
  595. if (a > (long) relocation || (long) relocation > b) \
  596. r = bfd_reloc_overflow
  597. #define ALIGN(m) \
  598. if (relocation & m) \
  599. r = bfd_reloc_other
  600. #define OP(i) \
  601. (contents[rel->r_offset + (i)])
  602. #define WARN_REDHAT(type) \
  603. /* xgettext:c-format */ \
  604. _bfd_error_handler \
  605. (_("%pB:%pA: warning: deprecated Red Hat reloc " \
  606. "%s detected against: %s"), \
  607. input_bfd, input_section, #type, name)
  608. /* Check for unsafe relocs in PID mode. These are any relocs where
  609. an absolute address is being computed. There are special cases
  610. for relocs against symbols that are known to be referenced in
  611. crt0.o before the PID base address register has been initialised. */
  612. #define UNSAFE_FOR_PID \
  613. do \
  614. { \
  615. if (pid_mode \
  616. && sec != NULL \
  617. && sec->flags & SEC_READONLY \
  618. && !(input_section->flags & SEC_DEBUGGING) \
  619. && strcmp (name, "__pid_base") != 0 \
  620. && strcmp (name, "__gp") != 0 \
  621. && strcmp (name, "__romdatastart") != 0 \
  622. && !saw_subtract) \
  623. /* xgettext:c-format */ \
  624. _bfd_error_handler (_("%pB(%pA): unsafe PID relocation %s " \
  625. "at %#" PRIx64 " (against %s in %s)"), \
  626. input_bfd, input_section, howto->name, \
  627. (uint64_t) (input_section->output_section->vma \
  628. + input_section->output_offset \
  629. + rel->r_offset), \
  630. name, sec->name); \
  631. } \
  632. while (0)
  633. /* Opcode relocs are always big endian. Data relocs are bi-endian. */
  634. switch (r_type)
  635. {
  636. case R_RX_NONE:
  637. break;
  638. case R_RX_RH_RELAX:
  639. break;
  640. case R_RX_RH_3_PCREL:
  641. WARN_REDHAT ("RX_RH_3_PCREL");
  642. RANGE (3, 10);
  643. OP (0) &= 0xf8;
  644. OP (0) |= relocation & 0x07;
  645. break;
  646. case R_RX_RH_8_NEG:
  647. WARN_REDHAT ("RX_RH_8_NEG");
  648. relocation = - relocation;
  649. /* Fall through. */
  650. case R_RX_DIR8S_PCREL:
  651. UNSAFE_FOR_PID;
  652. RANGE (-128, 127);
  653. OP (0) = relocation;
  654. break;
  655. case R_RX_DIR8S:
  656. UNSAFE_FOR_PID;
  657. RANGE (-128, 255);
  658. OP (0) = relocation;
  659. break;
  660. case R_RX_DIR8U:
  661. UNSAFE_FOR_PID;
  662. RANGE (0, 255);
  663. OP (0) = relocation;
  664. break;
  665. case R_RX_RH_16_NEG:
  666. WARN_REDHAT ("RX_RH_16_NEG");
  667. relocation = - relocation;
  668. /* Fall through. */
  669. case R_RX_DIR16S_PCREL:
  670. UNSAFE_FOR_PID;
  671. RANGE (-32768, 32767);
  672. #if RX_OPCODE_BIG_ENDIAN
  673. #else
  674. OP (0) = relocation;
  675. OP (1) = relocation >> 8;
  676. #endif
  677. break;
  678. case R_RX_RH_16_OP:
  679. WARN_REDHAT ("RX_RH_16_OP");
  680. UNSAFE_FOR_PID;
  681. RANGE (-32768, 32767);
  682. #if RX_OPCODE_BIG_ENDIAN
  683. OP (1) = relocation;
  684. OP (0) = relocation >> 8;
  685. #else
  686. OP (0) = relocation;
  687. OP (1) = relocation >> 8;
  688. #endif
  689. break;
  690. case R_RX_DIR16S:
  691. UNSAFE_FOR_PID;
  692. RANGE (-32768, 65535);
  693. if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
  694. {
  695. OP (1) = relocation;
  696. OP (0) = relocation >> 8;
  697. }
  698. else
  699. {
  700. OP (0) = relocation;
  701. OP (1) = relocation >> 8;
  702. }
  703. break;
  704. case R_RX_DIR16U:
  705. UNSAFE_FOR_PID;
  706. RANGE (0, 65536);
  707. #if RX_OPCODE_BIG_ENDIAN
  708. OP (1) = relocation;
  709. OP (0) = relocation >> 8;
  710. #else
  711. OP (0) = relocation;
  712. OP (1) = relocation >> 8;
  713. #endif
  714. break;
  715. case R_RX_DIR16:
  716. UNSAFE_FOR_PID;
  717. RANGE (-32768, 65536);
  718. #if RX_OPCODE_BIG_ENDIAN
  719. OP (1) = relocation;
  720. OP (0) = relocation >> 8;
  721. #else
  722. OP (0) = relocation;
  723. OP (1) = relocation >> 8;
  724. #endif
  725. break;
  726. case R_RX_DIR16_REV:
  727. UNSAFE_FOR_PID;
  728. RANGE (-32768, 65536);
  729. #if RX_OPCODE_BIG_ENDIAN
  730. OP (0) = relocation;
  731. OP (1) = relocation >> 8;
  732. #else
  733. OP (1) = relocation;
  734. OP (0) = relocation >> 8;
  735. #endif
  736. break;
  737. case R_RX_DIR3U_PCREL:
  738. RANGE (3, 10);
  739. OP (0) &= 0xf8;
  740. OP (0) |= relocation & 0x07;
  741. break;
  742. case R_RX_RH_24_NEG:
  743. UNSAFE_FOR_PID;
  744. WARN_REDHAT ("RX_RH_24_NEG");
  745. relocation = - relocation;
  746. /* Fall through. */
  747. case R_RX_DIR24S_PCREL:
  748. RANGE (-0x800000, 0x7fffff);
  749. #if RX_OPCODE_BIG_ENDIAN
  750. OP (2) = relocation;
  751. OP (1) = relocation >> 8;
  752. OP (0) = relocation >> 16;
  753. #else
  754. OP (0) = relocation;
  755. OP (1) = relocation >> 8;
  756. OP (2) = relocation >> 16;
  757. #endif
  758. break;
  759. case R_RX_RH_24_OP:
  760. UNSAFE_FOR_PID;
  761. WARN_REDHAT ("RX_RH_24_OP");
  762. RANGE (-0x800000, 0x7fffff);
  763. #if RX_OPCODE_BIG_ENDIAN
  764. OP (2) = relocation;
  765. OP (1) = relocation >> 8;
  766. OP (0) = relocation >> 16;
  767. #else
  768. OP (0) = relocation;
  769. OP (1) = relocation >> 8;
  770. OP (2) = relocation >> 16;
  771. #endif
  772. break;
  773. case R_RX_DIR24S:
  774. UNSAFE_FOR_PID;
  775. RANGE (-0x800000, 0x7fffff);
  776. if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
  777. {
  778. OP (2) = relocation;
  779. OP (1) = relocation >> 8;
  780. OP (0) = relocation >> 16;
  781. }
  782. else
  783. {
  784. OP (0) = relocation;
  785. OP (1) = relocation >> 8;
  786. OP (2) = relocation >> 16;
  787. }
  788. break;
  789. case R_RX_RH_24_UNS:
  790. UNSAFE_FOR_PID;
  791. WARN_REDHAT ("RX_RH_24_UNS");
  792. RANGE (0, 0xffffff);
  793. #if RX_OPCODE_BIG_ENDIAN
  794. OP (2) = relocation;
  795. OP (1) = relocation >> 8;
  796. OP (0) = relocation >> 16;
  797. #else
  798. OP (0) = relocation;
  799. OP (1) = relocation >> 8;
  800. OP (2) = relocation >> 16;
  801. #endif
  802. break;
  803. case R_RX_RH_32_NEG:
  804. UNSAFE_FOR_PID;
  805. WARN_REDHAT ("RX_RH_32_NEG");
  806. relocation = - relocation;
  807. #if RX_OPCODE_BIG_ENDIAN
  808. OP (3) = relocation;
  809. OP (2) = relocation >> 8;
  810. OP (1) = relocation >> 16;
  811. OP (0) = relocation >> 24;
  812. #else
  813. OP (0) = relocation;
  814. OP (1) = relocation >> 8;
  815. OP (2) = relocation >> 16;
  816. OP (3) = relocation >> 24;
  817. #endif
  818. break;
  819. case R_RX_RH_32_OP:
  820. UNSAFE_FOR_PID;
  821. WARN_REDHAT ("RX_RH_32_OP");
  822. #if RX_OPCODE_BIG_ENDIAN
  823. OP (3) = relocation;
  824. OP (2) = relocation >> 8;
  825. OP (1) = relocation >> 16;
  826. OP (0) = relocation >> 24;
  827. #else
  828. OP (0) = relocation;
  829. OP (1) = relocation >> 8;
  830. OP (2) = relocation >> 16;
  831. OP (3) = relocation >> 24;
  832. #endif
  833. break;
  834. case R_RX_DIR32:
  835. if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
  836. {
  837. OP (3) = relocation;
  838. OP (2) = relocation >> 8;
  839. OP (1) = relocation >> 16;
  840. OP (0) = relocation >> 24;
  841. }
  842. else
  843. {
  844. OP (0) = relocation;
  845. OP (1) = relocation >> 8;
  846. OP (2) = relocation >> 16;
  847. OP (3) = relocation >> 24;
  848. }
  849. break;
  850. case R_RX_DIR32_REV:
  851. if (BIGE (output_bfd))
  852. {
  853. OP (0) = relocation;
  854. OP (1) = relocation >> 8;
  855. OP (2) = relocation >> 16;
  856. OP (3) = relocation >> 24;
  857. }
  858. else
  859. {
  860. OP (3) = relocation;
  861. OP (2) = relocation >> 8;
  862. OP (1) = relocation >> 16;
  863. OP (0) = relocation >> 24;
  864. }
  865. break;
  866. case R_RX_RH_DIFF:
  867. {
  868. bfd_vma val;
  869. WARN_REDHAT ("RX_RH_DIFF");
  870. val = bfd_get_32 (output_bfd, & OP (0));
  871. val -= relocation;
  872. bfd_put_32 (output_bfd, val, & OP (0));
  873. }
  874. break;
  875. case R_RX_RH_GPRELB:
  876. WARN_REDHAT ("RX_RH_GPRELB");
  877. relocation -= get_gp (info, input_bfd, input_section, rel->r_offset);
  878. RANGE (0, 65535);
  879. #if RX_OPCODE_BIG_ENDIAN
  880. OP (1) = relocation;
  881. OP (0) = relocation >> 8;
  882. #else
  883. OP (0) = relocation;
  884. OP (1) = relocation >> 8;
  885. #endif
  886. break;
  887. case R_RX_RH_GPRELW:
  888. WARN_REDHAT ("RX_RH_GPRELW");
  889. relocation -= get_gp (info, input_bfd, input_section, rel->r_offset);
  890. ALIGN (1);
  891. relocation >>= 1;
  892. RANGE (0, 65535);
  893. #if RX_OPCODE_BIG_ENDIAN
  894. OP (1) = relocation;
  895. OP (0) = relocation >> 8;
  896. #else
  897. OP (0) = relocation;
  898. OP (1) = relocation >> 8;
  899. #endif
  900. break;
  901. case R_RX_RH_GPRELL:
  902. WARN_REDHAT ("RX_RH_GPRELL");
  903. relocation -= get_gp (info, input_bfd, input_section, rel->r_offset);
  904. ALIGN (3);
  905. relocation >>= 2;
  906. RANGE (0, 65535);
  907. #if RX_OPCODE_BIG_ENDIAN
  908. OP (1) = relocation;
  909. OP (0) = relocation >> 8;
  910. #else
  911. OP (0) = relocation;
  912. OP (1) = relocation >> 8;
  913. #endif
  914. break;
  915. /* Internal relocations just for relaxation: */
  916. case R_RX_RH_ABS5p5B:
  917. RX_STACK_POP (relocation);
  918. RANGE (0, 31);
  919. OP (0) &= 0xf8;
  920. OP (0) |= relocation >> 2;
  921. OP (1) &= 0x77;
  922. OP (1) |= (relocation << 6) & 0x80;
  923. OP (1) |= (relocation << 3) & 0x08;
  924. break;
  925. case R_RX_RH_ABS5p5W:
  926. RX_STACK_POP (relocation);
  927. RANGE (0, 62);
  928. ALIGN (1);
  929. relocation >>= 1;
  930. OP (0) &= 0xf8;
  931. OP (0) |= relocation >> 2;
  932. OP (1) &= 0x77;
  933. OP (1) |= (relocation << 6) & 0x80;
  934. OP (1) |= (relocation << 3) & 0x08;
  935. break;
  936. case R_RX_RH_ABS5p5L:
  937. RX_STACK_POP (relocation);
  938. RANGE (0, 124);
  939. ALIGN (3);
  940. relocation >>= 2;
  941. OP (0) &= 0xf8;
  942. OP (0) |= relocation >> 2;
  943. OP (1) &= 0x77;
  944. OP (1) |= (relocation << 6) & 0x80;
  945. OP (1) |= (relocation << 3) & 0x08;
  946. break;
  947. case R_RX_RH_ABS5p8B:
  948. RX_STACK_POP (relocation);
  949. RANGE (0, 31);
  950. OP (0) &= 0x70;
  951. OP (0) |= (relocation << 3) & 0x80;
  952. OP (0) |= relocation & 0x0f;
  953. break;
  954. case R_RX_RH_ABS5p8W:
  955. RX_STACK_POP (relocation);
  956. RANGE (0, 62);
  957. ALIGN (1);
  958. relocation >>= 1;
  959. OP (0) &= 0x70;
  960. OP (0) |= (relocation << 3) & 0x80;
  961. OP (0) |= relocation & 0x0f;
  962. break;
  963. case R_RX_RH_ABS5p8L:
  964. RX_STACK_POP (relocation);
  965. RANGE (0, 124);
  966. ALIGN (3);
  967. relocation >>= 2;
  968. OP (0) &= 0x70;
  969. OP (0) |= (relocation << 3) & 0x80;
  970. OP (0) |= relocation & 0x0f;
  971. break;
  972. case R_RX_RH_UIMM4p8:
  973. RANGE (0, 15);
  974. OP (0) &= 0x0f;
  975. OP (0) |= relocation << 4;
  976. break;
  977. case R_RX_RH_UNEG4p8:
  978. RANGE (-15, 0);
  979. OP (0) &= 0x0f;
  980. OP (0) |= (-relocation) << 4;
  981. break;
  982. /* Complex reloc handling: */
  983. case R_RX_ABS32:
  984. UNSAFE_FOR_PID;
  985. RX_STACK_POP (relocation);
  986. #if RX_OPCODE_BIG_ENDIAN
  987. OP (3) = relocation;
  988. OP (2) = relocation >> 8;
  989. OP (1) = relocation >> 16;
  990. OP (0) = relocation >> 24;
  991. #else
  992. OP (0) = relocation;
  993. OP (1) = relocation >> 8;
  994. OP (2) = relocation >> 16;
  995. OP (3) = relocation >> 24;
  996. #endif
  997. break;
  998. case R_RX_ABS32_REV:
  999. UNSAFE_FOR_PID;
  1000. RX_STACK_POP (relocation);
  1001. #if RX_OPCODE_BIG_ENDIAN
  1002. OP (0) = relocation;
  1003. OP (1) = relocation >> 8;
  1004. OP (2) = relocation >> 16;
  1005. OP (3) = relocation >> 24;
  1006. #else
  1007. OP (3) = relocation;
  1008. OP (2) = relocation >> 8;
  1009. OP (1) = relocation >> 16;
  1010. OP (0) = relocation >> 24;
  1011. #endif
  1012. break;
  1013. case R_RX_ABS24S_PCREL:
  1014. case R_RX_ABS24S:
  1015. UNSAFE_FOR_PID;
  1016. RX_STACK_POP (relocation);
  1017. RANGE (-0x800000, 0x7fffff);
  1018. if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
  1019. {
  1020. OP (2) = relocation;
  1021. OP (1) = relocation >> 8;
  1022. OP (0) = relocation >> 16;
  1023. }
  1024. else
  1025. {
  1026. OP (0) = relocation;
  1027. OP (1) = relocation >> 8;
  1028. OP (2) = relocation >> 16;
  1029. }
  1030. break;
  1031. case R_RX_ABS16:
  1032. UNSAFE_FOR_PID;
  1033. RX_STACK_POP (relocation);
  1034. RANGE (-32768, 65535);
  1035. #if RX_OPCODE_BIG_ENDIAN
  1036. OP (1) = relocation;
  1037. OP (0) = relocation >> 8;
  1038. #else
  1039. OP (0) = relocation;
  1040. OP (1) = relocation >> 8;
  1041. #endif
  1042. break;
  1043. case R_RX_ABS16_REV:
  1044. UNSAFE_FOR_PID;
  1045. RX_STACK_POP (relocation);
  1046. RANGE (-32768, 65535);
  1047. #if RX_OPCODE_BIG_ENDIAN
  1048. OP (0) = relocation;
  1049. OP (1) = relocation >> 8;
  1050. #else
  1051. OP (1) = relocation;
  1052. OP (0) = relocation >> 8;
  1053. #endif
  1054. break;
  1055. case R_RX_ABS16S_PCREL:
  1056. case R_RX_ABS16S:
  1057. RX_STACK_POP (relocation);
  1058. RANGE (-32768, 32767);
  1059. if (BIGE (output_bfd) && !(input_section->flags & SEC_CODE))
  1060. {
  1061. OP (1) = relocation;
  1062. OP (0) = relocation >> 8;
  1063. }
  1064. else
  1065. {
  1066. OP (0) = relocation;
  1067. OP (1) = relocation >> 8;
  1068. }
  1069. break;
  1070. case R_RX_ABS16U:
  1071. UNSAFE_FOR_PID;
  1072. RX_STACK_POP (relocation);
  1073. RANGE (0, 65536);
  1074. #if RX_OPCODE_BIG_ENDIAN
  1075. OP (1) = relocation;
  1076. OP (0) = relocation >> 8;
  1077. #else
  1078. OP (0) = relocation;
  1079. OP (1) = relocation >> 8;
  1080. #endif
  1081. break;
  1082. case R_RX_ABS16UL:
  1083. UNSAFE_FOR_PID;
  1084. RX_STACK_POP (relocation);
  1085. relocation >>= 2;
  1086. RANGE (0, 65536);
  1087. #if RX_OPCODE_BIG_ENDIAN
  1088. OP (1) = relocation;
  1089. OP (0) = relocation >> 8;
  1090. #else
  1091. OP (0) = relocation;
  1092. OP (1) = relocation >> 8;
  1093. #endif
  1094. break;
  1095. case R_RX_ABS16UW:
  1096. UNSAFE_FOR_PID;
  1097. RX_STACK_POP (relocation);
  1098. relocation >>= 1;
  1099. RANGE (0, 65536);
  1100. #if RX_OPCODE_BIG_ENDIAN
  1101. OP (1) = relocation;
  1102. OP (0) = relocation >> 8;
  1103. #else
  1104. OP (0) = relocation;
  1105. OP (1) = relocation >> 8;
  1106. #endif
  1107. break;
  1108. case R_RX_ABS8:
  1109. UNSAFE_FOR_PID;
  1110. RX_STACK_POP (relocation);
  1111. RANGE (-128, 255);
  1112. OP (0) = relocation;
  1113. break;
  1114. case R_RX_ABS8U:
  1115. UNSAFE_FOR_PID;
  1116. RX_STACK_POP (relocation);
  1117. RANGE (0, 255);
  1118. OP (0) = relocation;
  1119. break;
  1120. case R_RX_ABS8UL:
  1121. UNSAFE_FOR_PID;
  1122. RX_STACK_POP (relocation);
  1123. relocation >>= 2;
  1124. RANGE (0, 255);
  1125. OP (0) = relocation;
  1126. break;
  1127. case R_RX_ABS8UW:
  1128. UNSAFE_FOR_PID;
  1129. RX_STACK_POP (relocation);
  1130. relocation >>= 1;
  1131. RANGE (0, 255);
  1132. OP (0) = relocation;
  1133. break;
  1134. case R_RX_ABS8S:
  1135. UNSAFE_FOR_PID;
  1136. /* Fall through. */
  1137. case R_RX_ABS8S_PCREL:
  1138. RX_STACK_POP (relocation);
  1139. RANGE (-128, 127);
  1140. OP (0) = relocation;
  1141. break;
  1142. case R_RX_SYM:
  1143. if (r_symndx < symtab_hdr->sh_info)
  1144. RX_STACK_PUSH (sec->output_section->vma
  1145. + sec->output_offset
  1146. + sym->st_value
  1147. + rel->r_addend);
  1148. else
  1149. {
  1150. if (h != NULL
  1151. && (h->root.type == bfd_link_hash_defined
  1152. || h->root.type == bfd_link_hash_defweak))
  1153. RX_STACK_PUSH (h->root.u.def.value
  1154. + sec->output_section->vma
  1155. + sec->output_offset
  1156. + rel->r_addend);
  1157. else
  1158. _bfd_error_handler
  1159. (_("warning: RX_SYM reloc with an unknown symbol"));
  1160. }
  1161. break;
  1162. case R_RX_OPneg:
  1163. {
  1164. int32_t tmp;
  1165. saw_subtract = true;
  1166. RX_STACK_POP (tmp);
  1167. tmp = - tmp;
  1168. RX_STACK_PUSH (tmp);
  1169. }
  1170. break;
  1171. case R_RX_OPadd:
  1172. {
  1173. int32_t tmp1, tmp2;
  1174. RX_STACK_POP (tmp1);
  1175. RX_STACK_POP (tmp2);
  1176. tmp1 += tmp2;
  1177. RX_STACK_PUSH (tmp1);
  1178. }
  1179. break;
  1180. case R_RX_OPsub:
  1181. {
  1182. int32_t tmp1, tmp2;
  1183. saw_subtract = true;
  1184. RX_STACK_POP (tmp1);
  1185. RX_STACK_POP (tmp2);
  1186. tmp2 -= tmp1;
  1187. RX_STACK_PUSH (tmp2);
  1188. }
  1189. break;
  1190. case R_RX_OPmul:
  1191. {
  1192. int32_t tmp1, tmp2;
  1193. RX_STACK_POP (tmp1);
  1194. RX_STACK_POP (tmp2);
  1195. tmp1 *= tmp2;
  1196. RX_STACK_PUSH (tmp1);
  1197. }
  1198. break;
  1199. case R_RX_OPdiv:
  1200. {
  1201. int32_t tmp1, tmp2;
  1202. RX_STACK_POP (tmp1);
  1203. RX_STACK_POP (tmp2);
  1204. tmp1 /= tmp2;
  1205. RX_STACK_PUSH (tmp1);
  1206. }
  1207. break;
  1208. case R_RX_OPshla:
  1209. {
  1210. int32_t tmp1, tmp2;
  1211. RX_STACK_POP (tmp1);
  1212. RX_STACK_POP (tmp2);
  1213. tmp1 <<= tmp2;
  1214. RX_STACK_PUSH (tmp1);
  1215. }
  1216. break;
  1217. case R_RX_OPshra:
  1218. {
  1219. int32_t tmp1, tmp2;
  1220. RX_STACK_POP (tmp1);
  1221. RX_STACK_POP (tmp2);
  1222. tmp1 >>= tmp2;
  1223. RX_STACK_PUSH (tmp1);
  1224. }
  1225. break;
  1226. case R_RX_OPsctsize:
  1227. RX_STACK_PUSH (input_section->size);
  1228. break;
  1229. case R_RX_OPscttop:
  1230. RX_STACK_PUSH (input_section->output_section->vma);
  1231. break;
  1232. case R_RX_OPand:
  1233. {
  1234. int32_t tmp1, tmp2;
  1235. RX_STACK_POP (tmp1);
  1236. RX_STACK_POP (tmp2);
  1237. tmp1 &= tmp2;
  1238. RX_STACK_PUSH (tmp1);
  1239. }
  1240. break;
  1241. case R_RX_OPor:
  1242. {
  1243. int32_t tmp1, tmp2;
  1244. RX_STACK_POP (tmp1);
  1245. RX_STACK_POP (tmp2);
  1246. tmp1 |= tmp2;
  1247. RX_STACK_PUSH (tmp1);
  1248. }
  1249. break;
  1250. case R_RX_OPxor:
  1251. {
  1252. int32_t tmp1, tmp2;
  1253. RX_STACK_POP (tmp1);
  1254. RX_STACK_POP (tmp2);
  1255. tmp1 ^= tmp2;
  1256. RX_STACK_PUSH (tmp1);
  1257. }
  1258. break;
  1259. case R_RX_OPnot:
  1260. {
  1261. int32_t tmp;
  1262. RX_STACK_POP (tmp);
  1263. tmp = ~ tmp;
  1264. RX_STACK_PUSH (tmp);
  1265. }
  1266. break;
  1267. case R_RX_OPmod:
  1268. {
  1269. int32_t tmp1, tmp2;
  1270. RX_STACK_POP (tmp1);
  1271. RX_STACK_POP (tmp2);
  1272. tmp1 %= tmp2;
  1273. RX_STACK_PUSH (tmp1);
  1274. }
  1275. break;
  1276. case R_RX_OPromtop:
  1277. RX_STACK_PUSH (get_romstart (info, input_bfd, input_section, rel->r_offset));
  1278. break;
  1279. case R_RX_OPramtop:
  1280. RX_STACK_PUSH (get_ramstart (info, input_bfd, input_section, rel->r_offset));
  1281. break;
  1282. default:
  1283. r = bfd_reloc_notsupported;
  1284. break;
  1285. }
  1286. if (r != bfd_reloc_ok)
  1287. {
  1288. const char * msg = NULL;
  1289. switch (r)
  1290. {
  1291. case bfd_reloc_overflow:
  1292. /* Catch the case of a missing function declaration
  1293. and emit a more helpful error message. */
  1294. if (r_type == R_RX_DIR24S_PCREL)
  1295. /* xgettext:c-format */
  1296. msg = _("%pB(%pA): error: call to undefined function '%s'");
  1297. else
  1298. (*info->callbacks->reloc_overflow)
  1299. (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
  1300. input_bfd, input_section, rel->r_offset);
  1301. break;
  1302. case bfd_reloc_undefined:
  1303. (*info->callbacks->undefined_symbol)
  1304. (info, name, input_bfd, input_section, rel->r_offset, true);
  1305. break;
  1306. case bfd_reloc_other:
  1307. /* xgettext:c-format */
  1308. msg = _("%pB(%pA): warning: unaligned access to symbol '%s' in the small data area");
  1309. break;
  1310. case bfd_reloc_outofrange:
  1311. /* xgettext:c-format */
  1312. msg = _("%pB(%pA): internal error: out of range error");
  1313. break;
  1314. case bfd_reloc_notsupported:
  1315. /* xgettext:c-format */
  1316. msg = _("%pB(%pA): internal error: unsupported relocation error");
  1317. break;
  1318. case bfd_reloc_dangerous:
  1319. /* xgettext:c-format */
  1320. msg = _("%pB(%pA): internal error: dangerous relocation");
  1321. break;
  1322. default:
  1323. /* xgettext:c-format */
  1324. msg = _("%pB(%pA): internal error: unknown error");
  1325. break;
  1326. }
  1327. if (msg)
  1328. _bfd_error_handler (msg, input_bfd, input_section, name);
  1329. }
  1330. }
  1331. return true;
  1332. }
  1333. /* Relaxation Support. */
  1334. /* Progression of relocations from largest operand size to smallest
  1335. operand size. */
  1336. static int
  1337. next_smaller_reloc (int r)
  1338. {
  1339. switch (r)
  1340. {
  1341. case R_RX_DIR32: return R_RX_DIR24S;
  1342. case R_RX_DIR24S: return R_RX_DIR16S;
  1343. case R_RX_DIR16S: return R_RX_DIR8S;
  1344. case R_RX_DIR8S: return R_RX_NONE;
  1345. case R_RX_DIR16: return R_RX_DIR8;
  1346. case R_RX_DIR8: return R_RX_NONE;
  1347. case R_RX_DIR16U: return R_RX_DIR8U;
  1348. case R_RX_DIR8U: return R_RX_NONE;
  1349. case R_RX_DIR24S_PCREL: return R_RX_DIR16S_PCREL;
  1350. case R_RX_DIR16S_PCREL: return R_RX_DIR8S_PCREL;
  1351. case R_RX_DIR8S_PCREL: return R_RX_DIR3U_PCREL;
  1352. case R_RX_DIR16UL: return R_RX_DIR8UL;
  1353. case R_RX_DIR8UL: return R_RX_NONE;
  1354. case R_RX_DIR16UW: return R_RX_DIR8UW;
  1355. case R_RX_DIR8UW: return R_RX_NONE;
  1356. case R_RX_RH_32_OP: return R_RX_RH_24_OP;
  1357. case R_RX_RH_24_OP: return R_RX_RH_16_OP;
  1358. case R_RX_RH_16_OP: return R_RX_DIR8;
  1359. case R_RX_ABS32: return R_RX_ABS24S;
  1360. case R_RX_ABS24S: return R_RX_ABS16S;
  1361. case R_RX_ABS16: return R_RX_ABS8;
  1362. case R_RX_ABS16U: return R_RX_ABS8U;
  1363. case R_RX_ABS16S: return R_RX_ABS8S;
  1364. case R_RX_ABS8: return R_RX_NONE;
  1365. case R_RX_ABS8U: return R_RX_NONE;
  1366. case R_RX_ABS8S: return R_RX_NONE;
  1367. case R_RX_ABS24S_PCREL: return R_RX_ABS16S_PCREL;
  1368. case R_RX_ABS16S_PCREL: return R_RX_ABS8S_PCREL;
  1369. case R_RX_ABS8S_PCREL: return R_RX_NONE;
  1370. case R_RX_ABS16UL: return R_RX_ABS8UL;
  1371. case R_RX_ABS16UW: return R_RX_ABS8UW;
  1372. case R_RX_ABS8UL: return R_RX_NONE;
  1373. case R_RX_ABS8UW: return R_RX_NONE;
  1374. }
  1375. return r;
  1376. };
  1377. /* Delete some bytes from a section while relaxing. */
  1378. static bool
  1379. elf32_rx_relax_delete_bytes (bfd *abfd, asection *sec, bfd_vma addr, int count,
  1380. Elf_Internal_Rela *alignment_rel, int force_snip,
  1381. Elf_Internal_Rela *irelstart)
  1382. {
  1383. Elf_Internal_Shdr * symtab_hdr;
  1384. unsigned int sec_shndx;
  1385. bfd_byte * contents;
  1386. Elf_Internal_Rela * irel;
  1387. Elf_Internal_Rela * irelend;
  1388. Elf_Internal_Sym * isym;
  1389. Elf_Internal_Sym * isymend;
  1390. bfd_vma toaddr;
  1391. unsigned int symcount;
  1392. struct elf_link_hash_entry ** sym_hashes;
  1393. struct elf_link_hash_entry ** end_hashes;
  1394. if (!alignment_rel)
  1395. force_snip = 1;
  1396. sec_shndx = _bfd_elf_section_from_bfd_section (abfd, sec);
  1397. contents = elf_section_data (sec)->this_hdr.contents;
  1398. /* The deletion must stop at the next alignment boundary, if
  1399. ALIGNMENT_REL is non-NULL. */
  1400. toaddr = sec->size;
  1401. if (alignment_rel)
  1402. toaddr = alignment_rel->r_offset;
  1403. BFD_ASSERT (toaddr > addr);
  1404. /* Actually delete the bytes. */
  1405. memmove (contents + addr, contents + addr + count,
  1406. (size_t) (toaddr - addr - count));
  1407. /* If we don't have an alignment marker to worry about, we can just
  1408. shrink the section. Otherwise, we have to fill in the newly
  1409. created gap with NOP insns (0x03). */
  1410. if (force_snip)
  1411. sec->size -= count;
  1412. else
  1413. memset (contents + toaddr - count, 0x03, count);
  1414. irel = irelstart;
  1415. BFD_ASSERT (irel != NULL || sec->reloc_count == 0);
  1416. irelend = irel + sec->reloc_count;
  1417. /* Adjust all the relocs. */
  1418. for (; irel < irelend; irel++)
  1419. {
  1420. /* Get the new reloc address. */
  1421. if (irel->r_offset > addr
  1422. && (irel->r_offset < toaddr
  1423. || (force_snip && irel->r_offset == toaddr)))
  1424. irel->r_offset -= count;
  1425. /* If we see an ALIGN marker at the end of the gap, we move it
  1426. to the beginning of the gap, since marking these gaps is what
  1427. they're for. */
  1428. if (irel->r_offset == toaddr
  1429. && ELF32_R_TYPE (irel->r_info) == R_RX_RH_RELAX
  1430. && irel->r_addend & RX_RELAXA_ALIGN)
  1431. irel->r_offset -= count;
  1432. }
  1433. /* Adjust the local symbols defined in this section. */
  1434. symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
  1435. isym = (Elf_Internal_Sym *) symtab_hdr->contents;
  1436. isymend = isym + symtab_hdr->sh_info;
  1437. for (; isym < isymend; isym++)
  1438. {
  1439. /* If the symbol is in the range of memory we just moved, we
  1440. have to adjust its value. */
  1441. if (isym->st_shndx == sec_shndx
  1442. && isym->st_value > addr
  1443. && isym->st_value < toaddr)
  1444. isym->st_value -= count;
  1445. /* If the symbol *spans* the bytes we just deleted (i.e. it's
  1446. *end* is in the moved bytes but it's *start* isn't), then we
  1447. must adjust its size. */
  1448. if (isym->st_shndx == sec_shndx
  1449. && isym->st_value < addr
  1450. && isym->st_value + isym->st_size > addr
  1451. && isym->st_value + isym->st_size < toaddr)
  1452. isym->st_size -= count;
  1453. }
  1454. /* Now adjust the global symbols defined in this section. */
  1455. symcount = (symtab_hdr->sh_size / sizeof (Elf32_External_Sym)
  1456. - symtab_hdr->sh_info);
  1457. sym_hashes = elf_sym_hashes (abfd);
  1458. end_hashes = sym_hashes + symcount;
  1459. for (; sym_hashes < end_hashes; sym_hashes++)
  1460. {
  1461. struct elf_link_hash_entry *sym_hash = *sym_hashes;
  1462. if ((sym_hash->root.type == bfd_link_hash_defined
  1463. || sym_hash->root.type == bfd_link_hash_defweak)
  1464. && sym_hash->root.u.def.section == sec)
  1465. {
  1466. /* As above, adjust the value if needed. */
  1467. if (sym_hash->root.u.def.value > addr
  1468. && sym_hash->root.u.def.value < toaddr)
  1469. sym_hash->root.u.def.value -= count;
  1470. /* As above, adjust the size if needed. */
  1471. if (sym_hash->root.u.def.value < addr
  1472. && sym_hash->root.u.def.value + sym_hash->size > addr
  1473. && sym_hash->root.u.def.value + sym_hash->size < toaddr)
  1474. sym_hash->size -= count;
  1475. }
  1476. }
  1477. return true;
  1478. }
  1479. /* Used to sort relocs by address. If relocs have the same address,
  1480. we maintain their relative order, except that R_RX_RH_RELAX
  1481. alignment relocs must be the first reloc for any given address. */
  1482. static void
  1483. reloc_bubblesort (Elf_Internal_Rela * r, int count)
  1484. {
  1485. int i;
  1486. bool again;
  1487. bool swappit;
  1488. /* This is almost a classic bubblesort. It's the slowest sort, but
  1489. we're taking advantage of the fact that the relocations are
  1490. mostly in order already (the assembler emits them that way) and
  1491. we need relocs with the same address to remain in the same
  1492. relative order. */
  1493. again = true;
  1494. while (again)
  1495. {
  1496. again = false;
  1497. for (i = 0; i < count - 1; i ++)
  1498. {
  1499. if (r[i].r_offset > r[i + 1].r_offset)
  1500. swappit = true;
  1501. else if (r[i].r_offset < r[i + 1].r_offset)
  1502. swappit = false;
  1503. else if (ELF32_R_TYPE (r[i + 1].r_info) == R_RX_RH_RELAX
  1504. && (r[i + 1].r_addend & RX_RELAXA_ALIGN))
  1505. swappit = true;
  1506. else if (ELF32_R_TYPE (r[i + 1].r_info) == R_RX_RH_RELAX
  1507. && (r[i + 1].r_addend & RX_RELAXA_ELIGN)
  1508. && !(ELF32_R_TYPE (r[i].r_info) == R_RX_RH_RELAX
  1509. && (r[i].r_addend & RX_RELAXA_ALIGN)))
  1510. swappit = true;
  1511. else
  1512. swappit = false;
  1513. if (swappit)
  1514. {
  1515. Elf_Internal_Rela tmp;
  1516. tmp = r[i];
  1517. r[i] = r[i + 1];
  1518. r[i + 1] = tmp;
  1519. /* If we do move a reloc back, re-scan to see if it
  1520. needs to be moved even further back. This avoids
  1521. most of the O(n^2) behavior for our cases. */
  1522. if (i > 0)
  1523. i -= 2;
  1524. again = true;
  1525. }
  1526. }
  1527. }
  1528. }
  1529. #define OFFSET_FOR_RELOC(rel, lrel, scale) \
  1530. rx_offset_for_reloc (abfd, rel + 1, symtab_hdr, shndx_buf, intsyms, \
  1531. lrel, abfd, sec, link_info, scale)
  1532. static bfd_vma
  1533. rx_offset_for_reloc (bfd * abfd,
  1534. Elf_Internal_Rela * rel,
  1535. Elf_Internal_Shdr * symtab_hdr,
  1536. bfd_byte * shndx_buf ATTRIBUTE_UNUSED,
  1537. Elf_Internal_Sym * intsyms,
  1538. Elf_Internal_Rela ** lrel,
  1539. bfd * input_bfd,
  1540. asection * input_section,
  1541. struct bfd_link_info * info,
  1542. int * scale)
  1543. {
  1544. bfd_vma symval;
  1545. bfd_reloc_status_type r;
  1546. *scale = 1;
  1547. /* REL is the first of 1..N relocations. We compute the symbol
  1548. value for each relocation, then combine them if needed. LREL
  1549. gets a pointer to the last relocation used. */
  1550. while (1)
  1551. {
  1552. int32_t tmp1, tmp2;
  1553. /* Get the value of the symbol referred to by the reloc. */
  1554. if (ELF32_R_SYM (rel->r_info) < symtab_hdr->sh_info)
  1555. {
  1556. /* A local symbol. */
  1557. Elf_Internal_Sym *isym;
  1558. asection *ssec;
  1559. isym = intsyms + ELF32_R_SYM (rel->r_info);
  1560. if (isym->st_shndx == SHN_UNDEF)
  1561. ssec = bfd_und_section_ptr;
  1562. else if (isym->st_shndx == SHN_ABS)
  1563. ssec = bfd_abs_section_ptr;
  1564. else if (isym->st_shndx == SHN_COMMON)
  1565. ssec = bfd_com_section_ptr;
  1566. else
  1567. ssec = bfd_section_from_elf_index (abfd,
  1568. isym->st_shndx);
  1569. /* Initial symbol value. */
  1570. symval = isym->st_value;
  1571. /* GAS may have made this symbol relative to a section, in
  1572. which case, we have to add the addend to find the
  1573. symbol. */
  1574. if (ELF_ST_TYPE (isym->st_info) == STT_SECTION)
  1575. symval += rel->r_addend;
  1576. if (ssec)
  1577. {
  1578. if ((ssec->flags & SEC_MERGE)
  1579. && ssec->sec_info_type == SEC_INFO_TYPE_MERGE)
  1580. symval = _bfd_merged_section_offset (abfd, & ssec,
  1581. elf_section_data (ssec)->sec_info,
  1582. symval);
  1583. }
  1584. /* Now make the offset relative to where the linker is putting it. */
  1585. if (ssec)
  1586. symval +=
  1587. ssec->output_section->vma + ssec->output_offset;
  1588. symval += rel->r_addend;
  1589. }
  1590. else
  1591. {
  1592. unsigned long indx;
  1593. struct elf_link_hash_entry * h;
  1594. /* An external symbol. */
  1595. indx = ELF32_R_SYM (rel->r_info) - symtab_hdr->sh_info;
  1596. h = elf_sym_hashes (abfd)[indx];
  1597. BFD_ASSERT (h != NULL);
  1598. if (h->root.type != bfd_link_hash_defined
  1599. && h->root.type != bfd_link_hash_defweak)
  1600. {
  1601. /* This appears to be a reference to an undefined
  1602. symbol. Just ignore it--it will be caught by the
  1603. regular reloc processing. */
  1604. if (lrel)
  1605. *lrel = rel;
  1606. return 0;
  1607. }
  1608. symval = (h->root.u.def.value
  1609. + h->root.u.def.section->output_section->vma
  1610. + h->root.u.def.section->output_offset);
  1611. symval += rel->r_addend;
  1612. }
  1613. switch (ELF32_R_TYPE (rel->r_info))
  1614. {
  1615. case R_RX_SYM:
  1616. RX_STACK_PUSH (symval);
  1617. break;
  1618. case R_RX_OPneg:
  1619. RX_STACK_POP (tmp1);
  1620. tmp1 = - tmp1;
  1621. RX_STACK_PUSH (tmp1);
  1622. break;
  1623. case R_RX_OPadd:
  1624. RX_STACK_POP (tmp1);
  1625. RX_STACK_POP (tmp2);
  1626. tmp1 += tmp2;
  1627. RX_STACK_PUSH (tmp1);
  1628. break;
  1629. case R_RX_OPsub:
  1630. RX_STACK_POP (tmp1);
  1631. RX_STACK_POP (tmp2);
  1632. tmp2 -= tmp1;
  1633. RX_STACK_PUSH (tmp2);
  1634. break;
  1635. case R_RX_OPmul:
  1636. RX_STACK_POP (tmp1);
  1637. RX_STACK_POP (tmp2);
  1638. tmp1 *= tmp2;
  1639. RX_STACK_PUSH (tmp1);
  1640. break;
  1641. case R_RX_OPdiv:
  1642. RX_STACK_POP (tmp1);
  1643. RX_STACK_POP (tmp2);
  1644. tmp1 /= tmp2;
  1645. RX_STACK_PUSH (tmp1);
  1646. break;
  1647. case R_RX_OPshla:
  1648. RX_STACK_POP (tmp1);
  1649. RX_STACK_POP (tmp2);
  1650. tmp1 <<= tmp2;
  1651. RX_STACK_PUSH (tmp1);
  1652. break;
  1653. case R_RX_OPshra:
  1654. RX_STACK_POP (tmp1);
  1655. RX_STACK_POP (tmp2);
  1656. tmp1 >>= tmp2;
  1657. RX_STACK_PUSH (tmp1);
  1658. break;
  1659. case R_RX_OPsctsize:
  1660. RX_STACK_PUSH (input_section->size);
  1661. break;
  1662. case R_RX_OPscttop:
  1663. RX_STACK_PUSH (input_section->output_section->vma);
  1664. break;
  1665. case R_RX_OPand:
  1666. RX_STACK_POP (tmp1);
  1667. RX_STACK_POP (tmp2);
  1668. tmp1 &= tmp2;
  1669. RX_STACK_PUSH (tmp1);
  1670. break;
  1671. case R_RX_OPor:
  1672. RX_STACK_POP (tmp1);
  1673. RX_STACK_POP (tmp2);
  1674. tmp1 |= tmp2;
  1675. RX_STACK_PUSH (tmp1);
  1676. break;
  1677. case R_RX_OPxor:
  1678. RX_STACK_POP (tmp1);
  1679. RX_STACK_POP (tmp2);
  1680. tmp1 ^= tmp2;
  1681. RX_STACK_PUSH (tmp1);
  1682. break;
  1683. case R_RX_OPnot:
  1684. RX_STACK_POP (tmp1);
  1685. tmp1 = ~ tmp1;
  1686. RX_STACK_PUSH (tmp1);
  1687. break;
  1688. case R_RX_OPmod:
  1689. RX_STACK_POP (tmp1);
  1690. RX_STACK_POP (tmp2);
  1691. tmp1 %= tmp2;
  1692. RX_STACK_PUSH (tmp1);
  1693. break;
  1694. case R_RX_OPromtop:
  1695. RX_STACK_PUSH (get_romstart (info, input_bfd, input_section, rel->r_offset));
  1696. break;
  1697. case R_RX_OPramtop:
  1698. RX_STACK_PUSH (get_ramstart (info, input_bfd, input_section, rel->r_offset));
  1699. break;
  1700. case R_RX_DIR16UL:
  1701. case R_RX_DIR8UL:
  1702. case R_RX_ABS16UL:
  1703. case R_RX_ABS8UL:
  1704. if (rx_stack_top)
  1705. RX_STACK_POP (symval);
  1706. if (lrel)
  1707. *lrel = rel;
  1708. *scale = 4;
  1709. return symval;
  1710. case R_RX_DIR16UW:
  1711. case R_RX_DIR8UW:
  1712. case R_RX_ABS16UW:
  1713. case R_RX_ABS8UW:
  1714. if (rx_stack_top)
  1715. RX_STACK_POP (symval);
  1716. if (lrel)
  1717. *lrel = rel;
  1718. *scale = 2;
  1719. return symval;
  1720. default:
  1721. if (rx_stack_top)
  1722. RX_STACK_POP (symval);
  1723. if (lrel)
  1724. *lrel = rel;
  1725. return symval;
  1726. }
  1727. rel ++;
  1728. }
  1729. /* FIXME. */
  1730. (void) r;
  1731. }
  1732. static void
  1733. move_reloc (Elf_Internal_Rela * irel, Elf_Internal_Rela * srel, int delta)
  1734. {
  1735. bfd_vma old_offset = srel->r_offset;
  1736. irel ++;
  1737. while (irel <= srel)
  1738. {
  1739. if (irel->r_offset == old_offset)
  1740. irel->r_offset += delta;
  1741. irel ++;
  1742. }
  1743. }
  1744. /* Relax one section. */
  1745. static bool
  1746. elf32_rx_relax_section (bfd *abfd,
  1747. asection *sec,
  1748. struct bfd_link_info *link_info,
  1749. bool *again,
  1750. bool allow_pcrel3)
  1751. {
  1752. Elf_Internal_Shdr *symtab_hdr;
  1753. Elf_Internal_Shdr *shndx_hdr;
  1754. Elf_Internal_Rela *internal_relocs;
  1755. Elf_Internal_Rela *irel;
  1756. Elf_Internal_Rela *srel;
  1757. Elf_Internal_Rela *irelend;
  1758. Elf_Internal_Rela *next_alignment;
  1759. Elf_Internal_Rela *prev_alignment;
  1760. bfd_byte *contents = NULL;
  1761. bfd_byte *free_contents = NULL;
  1762. Elf_Internal_Sym *intsyms = NULL;
  1763. Elf_Internal_Sym *free_intsyms = NULL;
  1764. bfd_byte *shndx_buf = NULL;
  1765. bfd_vma pc;
  1766. bfd_vma sec_start;
  1767. bfd_vma symval = 0;
  1768. int pcrel = 0;
  1769. int code = 0;
  1770. int section_alignment_glue;
  1771. /* how much to scale the relocation by - 1, 2, or 4. */
  1772. int scale;
  1773. /* Assume nothing changes. */
  1774. *again = false;
  1775. /* We don't have to do anything for a relocatable link, if
  1776. this section does not have relocs, or if this is not a
  1777. code section. */
  1778. if (bfd_link_relocatable (link_info)
  1779. || (sec->flags & SEC_RELOC) == 0
  1780. || sec->reloc_count == 0
  1781. || (sec->flags & SEC_CODE) == 0)
  1782. return true;
  1783. symtab_hdr = & elf_symtab_hdr (abfd);
  1784. if (elf_symtab_shndx_list (abfd))
  1785. shndx_hdr = & elf_symtab_shndx_list (abfd)->hdr;
  1786. else
  1787. shndx_hdr = NULL;
  1788. sec_start = sec->output_section->vma + sec->output_offset;
  1789. /* Get the section contents. */
  1790. if (elf_section_data (sec)->this_hdr.contents != NULL)
  1791. contents = elf_section_data (sec)->this_hdr.contents;
  1792. /* Go get them off disk. */
  1793. else
  1794. {
  1795. if (! bfd_malloc_and_get_section (abfd, sec, &contents))
  1796. goto error_return;
  1797. elf_section_data (sec)->this_hdr.contents = contents;
  1798. }
  1799. /* Read this BFD's symbols. */
  1800. /* Get cached copy if it exists. */
  1801. if (symtab_hdr->contents != NULL)
  1802. intsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
  1803. else
  1804. {
  1805. intsyms = bfd_elf_get_elf_syms (abfd, symtab_hdr, symtab_hdr->sh_info, 0, NULL, NULL, NULL);
  1806. symtab_hdr->contents = (bfd_byte *) intsyms;
  1807. }
  1808. if (shndx_hdr && shndx_hdr->sh_size != 0)
  1809. {
  1810. size_t amt;
  1811. if (_bfd_mul_overflow (symtab_hdr->sh_info,
  1812. sizeof (Elf_External_Sym_Shndx), &amt))
  1813. {
  1814. bfd_set_error (bfd_error_file_too_big);
  1815. goto error_return;
  1816. }
  1817. if (bfd_seek (abfd, shndx_hdr->sh_offset, SEEK_SET) != 0)
  1818. goto error_return;
  1819. shndx_buf = _bfd_malloc_and_read (abfd, amt, amt);
  1820. if (shndx_buf == NULL)
  1821. goto error_return;
  1822. shndx_hdr->contents = shndx_buf;
  1823. }
  1824. /* Get a copy of the native relocations. */
  1825. /* Note - we ignore the setting of link_info->keep_memory when reading
  1826. in these relocs. We have to maintain a permanent copy of the relocs
  1827. because we are going to walk over them multiple times, adjusting them
  1828. as bytes are deleted from the section, and with this relaxation
  1829. function itself being called multiple times on the same section... */
  1830. internal_relocs = _bfd_elf_link_read_relocs
  1831. (abfd, sec, NULL, (Elf_Internal_Rela *) NULL, true);
  1832. if (internal_relocs == NULL)
  1833. goto error_return;
  1834. /* The RL_ relocs must be just before the operand relocs they go
  1835. with, so we must sort them to guarantee this. We use bubblesort
  1836. instead of qsort so we can guarantee that relocs with the same
  1837. address remain in the same relative order. */
  1838. reloc_bubblesort (internal_relocs, sec->reloc_count);
  1839. /* Walk through them looking for relaxing opportunities. */
  1840. irelend = internal_relocs + sec->reloc_count;
  1841. /* This will either be NULL or a pointer to the next alignment
  1842. relocation. */
  1843. next_alignment = internal_relocs;
  1844. /* This will be the previous alignment, although at first it points
  1845. to the first real relocation. */
  1846. prev_alignment = internal_relocs;
  1847. /* We calculate worst case shrinkage caused by alignment directives.
  1848. No fool-proof, but better than either ignoring the problem or
  1849. doing heavy duty analysis of all the alignment markers in all
  1850. input sections. */
  1851. section_alignment_glue = 0;
  1852. for (irel = internal_relocs; irel < irelend; irel++)
  1853. if (ELF32_R_TYPE (irel->r_info) == R_RX_RH_RELAX
  1854. && irel->r_addend & RX_RELAXA_ALIGN)
  1855. {
  1856. int this_glue = 1 << (irel->r_addend & RX_RELAXA_ANUM);
  1857. if (section_alignment_glue < this_glue)
  1858. section_alignment_glue = this_glue;
  1859. }
  1860. /* Worst case is all 0..N alignments, in order, causing 2*N-1 byte
  1861. shrinkage. */
  1862. section_alignment_glue *= 2;
  1863. for (irel = internal_relocs; irel < irelend; irel++)
  1864. {
  1865. unsigned char *insn;
  1866. int nrelocs;
  1867. /* The insns we care about are all marked with one of these. */
  1868. if (ELF32_R_TYPE (irel->r_info) != R_RX_RH_RELAX)
  1869. continue;
  1870. if (irel->r_addend & RX_RELAXA_ALIGN
  1871. || next_alignment == internal_relocs)
  1872. {
  1873. /* When we delete bytes, we need to maintain all the alignments
  1874. indicated. In addition, we need to be careful about relaxing
  1875. jumps across alignment boundaries - these displacements
  1876. *grow* when we delete bytes. For now, don't shrink
  1877. displacements across an alignment boundary, just in case.
  1878. Note that this only affects relocations to the same
  1879. section. */
  1880. prev_alignment = next_alignment;
  1881. next_alignment += 2;
  1882. while (next_alignment < irelend
  1883. && (ELF32_R_TYPE (next_alignment->r_info) != R_RX_RH_RELAX
  1884. || !(next_alignment->r_addend & RX_RELAXA_ELIGN)))
  1885. next_alignment ++;
  1886. if (next_alignment >= irelend || next_alignment->r_offset == 0)
  1887. next_alignment = NULL;
  1888. }
  1889. /* When we hit alignment markers, see if we've shrunk enough
  1890. before them to reduce the gap without violating the alignment
  1891. requirements. */
  1892. if (irel->r_addend & RX_RELAXA_ALIGN)
  1893. {
  1894. /* At this point, the next relocation *should* be the ELIGN
  1895. end marker. */
  1896. Elf_Internal_Rela *erel = irel + 1;
  1897. unsigned int alignment, nbytes;
  1898. if (ELF32_R_TYPE (erel->r_info) != R_RX_RH_RELAX)
  1899. continue;
  1900. if (!(erel->r_addend & RX_RELAXA_ELIGN))
  1901. continue;
  1902. alignment = 1 << (irel->r_addend & RX_RELAXA_ANUM);
  1903. if (erel->r_offset - irel->r_offset < alignment)
  1904. continue;
  1905. nbytes = erel->r_offset - irel->r_offset;
  1906. nbytes /= alignment;
  1907. nbytes *= alignment;
  1908. elf32_rx_relax_delete_bytes (abfd, sec, erel->r_offset-nbytes, nbytes, next_alignment,
  1909. erel->r_offset == sec->size, internal_relocs);
  1910. *again = true;
  1911. continue;
  1912. }
  1913. if (irel->r_addend & RX_RELAXA_ELIGN)
  1914. continue;
  1915. insn = contents + irel->r_offset;
  1916. nrelocs = irel->r_addend & RX_RELAXA_RNUM;
  1917. /* At this point, we have an insn that is a candidate for linker
  1918. relaxation. There are NRELOCS relocs following that may be
  1919. relaxed, although each reloc may be made of more than one
  1920. reloc entry (such as gp-rel symbols). */
  1921. /* Get the value of the symbol referred to by the reloc. Just
  1922. in case this is the last reloc in the list, use the RL's
  1923. addend to choose between this reloc (no addend) or the next
  1924. (yes addend, which means at least one following reloc). */
  1925. /* srel points to the "current" reloction for this insn -
  1926. actually the last reloc for a given operand, which is the one
  1927. we need to update. We check the relaxations in the same
  1928. order that the relocations happen, so we'll just push it
  1929. along as we go. */
  1930. srel = irel;
  1931. pc = sec->output_section->vma + sec->output_offset
  1932. + srel->r_offset;
  1933. #define GET_RELOC \
  1934. symval = OFFSET_FOR_RELOC (srel, &srel, &scale); \
  1935. pcrel = symval - pc + srel->r_addend; \
  1936. nrelocs --;
  1937. #define SNIPNR(offset, nbytes) \
  1938. elf32_rx_relax_delete_bytes (abfd, sec, (insn - contents) + offset, nbytes, next_alignment, 0, internal_relocs);
  1939. #define SNIP(offset, nbytes, newtype) \
  1940. SNIPNR (offset, nbytes); \
  1941. srel->r_info = ELF32_R_INFO (ELF32_R_SYM (srel->r_info), newtype)
  1942. /* The order of these bit tests must match the order that the
  1943. relocs appear in. Since we sorted those by offset, we can
  1944. predict them. */
  1945. /* Note that the numbers in, say, DSP6 are the bit offsets of
  1946. the code fields that describe the operand. Bits number 0 for
  1947. the MSB of insn[0]. */
  1948. /* DSP* codes:
  1949. 0 00 [reg]
  1950. 1 01 dsp:8[reg]
  1951. 2 10 dsp:16[reg]
  1952. 3 11 reg */
  1953. if (irel->r_addend & RX_RELAXA_DSP6)
  1954. {
  1955. GET_RELOC;
  1956. code = insn[0] & 3;
  1957. if (code == 2 && symval/scale <= 255)
  1958. {
  1959. unsigned int newrel = ELF32_R_TYPE (srel->r_info);
  1960. insn[0] &= 0xfc;
  1961. insn[0] |= 0x01;
  1962. newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
  1963. if (newrel != ELF32_R_TYPE (srel->r_info))
  1964. {
  1965. SNIP (3, 1, newrel);
  1966. *again = true;
  1967. }
  1968. }
  1969. else if (code == 1 && symval == 0)
  1970. {
  1971. insn[0] &= 0xfc;
  1972. SNIP (2, 1, R_RX_NONE);
  1973. *again = true;
  1974. }
  1975. /* Special case DSP:5 format: MOV.bwl dsp:5[Rsrc],Rdst. */
  1976. else if (code == 1 && symval/scale <= 31
  1977. /* Decodable bits. */
  1978. && (insn[0] & 0xcc) == 0xcc
  1979. /* Width. */
  1980. && (insn[0] & 0x30) != 0x30
  1981. /* Register MSBs. */
  1982. && (insn[1] & 0x88) == 0x00)
  1983. {
  1984. int newrel = 0;
  1985. insn[0] = 0x88 | (insn[0] & 0x30);
  1986. /* The register fields are in the right place already. */
  1987. /* We can't relax this new opcode. */
  1988. irel->r_addend = 0;
  1989. switch ((insn[0] & 0x30) >> 4)
  1990. {
  1991. case 0:
  1992. newrel = R_RX_RH_ABS5p5B;
  1993. break;
  1994. case 1:
  1995. newrel = R_RX_RH_ABS5p5W;
  1996. break;
  1997. case 2:
  1998. newrel = R_RX_RH_ABS5p5L;
  1999. break;
  2000. }
  2001. move_reloc (irel, srel, -2);
  2002. SNIP (2, 1, newrel);
  2003. }
  2004. /* Special case DSP:5 format: MOVU.bw dsp:5[Rsrc],Rdst. */
  2005. else if (code == 1 && symval/scale <= 31
  2006. /* Decodable bits. */
  2007. && (insn[0] & 0xf8) == 0x58
  2008. /* Register MSBs. */
  2009. && (insn[1] & 0x88) == 0x00)
  2010. {
  2011. int newrel = 0;
  2012. insn[0] = 0xb0 | ((insn[0] & 0x04) << 1);
  2013. /* The register fields are in the right place already. */
  2014. /* We can't relax this new opcode. */
  2015. irel->r_addend = 0;
  2016. switch ((insn[0] & 0x08) >> 3)
  2017. {
  2018. case 0:
  2019. newrel = R_RX_RH_ABS5p5B;
  2020. break;
  2021. case 1:
  2022. newrel = R_RX_RH_ABS5p5W;
  2023. break;
  2024. }
  2025. move_reloc (irel, srel, -2);
  2026. SNIP (2, 1, newrel);
  2027. }
  2028. }
  2029. /* A DSP4 operand always follows a DSP6 operand, even if there's
  2030. no relocation for it. We have to read the code out of the
  2031. opcode to calculate the offset of the operand. */
  2032. if (irel->r_addend & RX_RELAXA_DSP4)
  2033. {
  2034. int code6, offset = 0;
  2035. GET_RELOC;
  2036. code6 = insn[0] & 0x03;
  2037. switch (code6)
  2038. {
  2039. case 0: offset = 2; break;
  2040. case 1: offset = 3; break;
  2041. case 2: offset = 4; break;
  2042. case 3: offset = 2; break;
  2043. }
  2044. code = (insn[0] & 0x0c) >> 2;
  2045. if (code == 2 && symval / scale <= 255)
  2046. {
  2047. unsigned int newrel = ELF32_R_TYPE (srel->r_info);
  2048. insn[0] &= 0xf3;
  2049. insn[0] |= 0x04;
  2050. newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
  2051. if (newrel != ELF32_R_TYPE (srel->r_info))
  2052. {
  2053. SNIP (offset+1, 1, newrel);
  2054. *again = true;
  2055. }
  2056. }
  2057. else if (code == 1 && symval == 0)
  2058. {
  2059. insn[0] &= 0xf3;
  2060. SNIP (offset, 1, R_RX_NONE);
  2061. *again = true;
  2062. }
  2063. /* Special case DSP:5 format: MOV.bwl Rsrc,dsp:5[Rdst] */
  2064. else if (code == 1 && symval/scale <= 31
  2065. /* Decodable bits. */
  2066. && (insn[0] & 0xc3) == 0xc3
  2067. /* Width. */
  2068. && (insn[0] & 0x30) != 0x30
  2069. /* Register MSBs. */
  2070. && (insn[1] & 0x88) == 0x00)
  2071. {
  2072. int newrel = 0;
  2073. insn[0] = 0x80 | (insn[0] & 0x30);
  2074. /* The register fields are in the right place already. */
  2075. /* We can't relax this new opcode. */
  2076. irel->r_addend = 0;
  2077. switch ((insn[0] & 0x30) >> 4)
  2078. {
  2079. case 0:
  2080. newrel = R_RX_RH_ABS5p5B;
  2081. break;
  2082. case 1:
  2083. newrel = R_RX_RH_ABS5p5W;
  2084. break;
  2085. case 2:
  2086. newrel = R_RX_RH_ABS5p5L;
  2087. break;
  2088. }
  2089. move_reloc (irel, srel, -2);
  2090. SNIP (2, 1, newrel);
  2091. }
  2092. }
  2093. /* These always occur alone, but the offset depends on whether
  2094. it's a MEMEX opcode (0x06) or not. */
  2095. if (irel->r_addend & RX_RELAXA_DSP14)
  2096. {
  2097. int offset;
  2098. GET_RELOC;
  2099. if (insn[0] == 0x06)
  2100. offset = 3;
  2101. else
  2102. offset = 4;
  2103. code = insn[1] & 3;
  2104. if (code == 2 && symval / scale <= 255)
  2105. {
  2106. unsigned int newrel = ELF32_R_TYPE (srel->r_info);
  2107. insn[1] &= 0xfc;
  2108. insn[1] |= 0x01;
  2109. newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
  2110. if (newrel != ELF32_R_TYPE (srel->r_info))
  2111. {
  2112. SNIP (offset, 1, newrel);
  2113. *again = true;
  2114. }
  2115. }
  2116. else if (code == 1 && symval == 0)
  2117. {
  2118. insn[1] &= 0xfc;
  2119. SNIP (offset, 1, R_RX_NONE);
  2120. *again = true;
  2121. }
  2122. }
  2123. /* IMM* codes:
  2124. 0 00 imm:32
  2125. 1 01 simm:8
  2126. 2 10 simm:16
  2127. 3 11 simm:24. */
  2128. /* These always occur alone. */
  2129. if (irel->r_addend & RX_RELAXA_IMM6)
  2130. {
  2131. long ssymval;
  2132. GET_RELOC;
  2133. /* These relocations sign-extend, so we must do signed compares. */
  2134. ssymval = (long) symval;
  2135. code = insn[0] & 0x03;
  2136. if (code == 0 && ssymval <= 8388607 && ssymval >= -8388608)
  2137. {
  2138. unsigned int newrel = ELF32_R_TYPE (srel->r_info);
  2139. insn[0] &= 0xfc;
  2140. insn[0] |= 0x03;
  2141. newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
  2142. if (newrel != ELF32_R_TYPE (srel->r_info))
  2143. {
  2144. SNIP (2, 1, newrel);
  2145. *again = true;
  2146. }
  2147. }
  2148. else if (code == 3 && ssymval <= 32767 && ssymval >= -32768)
  2149. {
  2150. unsigned int newrel = ELF32_R_TYPE (srel->r_info);
  2151. insn[0] &= 0xfc;
  2152. insn[0] |= 0x02;
  2153. newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
  2154. if (newrel != ELF32_R_TYPE (srel->r_info))
  2155. {
  2156. SNIP (2, 1, newrel);
  2157. *again = true;
  2158. }
  2159. }
  2160. /* Special case UIMM8 format: CMP #uimm8,Rdst. */
  2161. else if (code == 2 && ssymval <= 255 && ssymval >= 16
  2162. /* Decodable bits. */
  2163. && (insn[0] & 0xfc) == 0x74
  2164. /* Decodable bits. */
  2165. && ((insn[1] & 0xf0) == 0x00))
  2166. {
  2167. int newrel;
  2168. insn[0] = 0x75;
  2169. insn[1] = 0x50 | (insn[1] & 0x0f);
  2170. /* We can't relax this new opcode. */
  2171. irel->r_addend = 0;
  2172. if (STACK_REL_P (ELF32_R_TYPE (srel->r_info)))
  2173. newrel = R_RX_ABS8U;
  2174. else
  2175. newrel = R_RX_DIR8U;
  2176. SNIP (2, 1, newrel);
  2177. *again = true;
  2178. }
  2179. else if (code == 2 && ssymval <= 127 && ssymval >= -128)
  2180. {
  2181. unsigned int newrel = ELF32_R_TYPE (srel->r_info);
  2182. insn[0] &= 0xfc;
  2183. insn[0] |= 0x01;
  2184. newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
  2185. if (newrel != ELF32_R_TYPE (srel->r_info))
  2186. {
  2187. SNIP (2, 1, newrel);
  2188. *again = true;
  2189. }
  2190. }
  2191. /* Special case UIMM4 format: CMP, MUL, AND, OR. */
  2192. else if (code == 1 && ssymval <= 15 && ssymval >= 0
  2193. /* Decodable bits and immediate type. */
  2194. && insn[0] == 0x75
  2195. /* Decodable bits. */
  2196. && (insn[1] & 0xc0) == 0x00)
  2197. {
  2198. static const int newop[4] = { 1, 3, 4, 5 };
  2199. insn[0] = 0x60 | newop[insn[1] >> 4];
  2200. /* The register number doesn't move. */
  2201. /* We can't relax this new opcode. */
  2202. irel->r_addend = 0;
  2203. move_reloc (irel, srel, -1);
  2204. SNIP (2, 1, R_RX_RH_UIMM4p8);
  2205. *again = true;
  2206. }
  2207. /* Special case UIMM4 format: ADD -> ADD/SUB. */
  2208. else if (code == 1 && ssymval <= 15 && ssymval >= -15
  2209. /* Decodable bits and immediate type. */
  2210. && insn[0] == 0x71
  2211. /* Same register for source and destination. */
  2212. && ((insn[1] >> 4) == (insn[1] & 0x0f)))
  2213. {
  2214. int newrel;
  2215. /* Note that we can't turn "add $0,Rs" into a NOP
  2216. because the flags need to be set right. */
  2217. if (ssymval < 0)
  2218. {
  2219. insn[0] = 0x60; /* Subtract. */
  2220. newrel = R_RX_RH_UNEG4p8;
  2221. }
  2222. else
  2223. {
  2224. insn[0] = 0x62; /* Add. */
  2225. newrel = R_RX_RH_UIMM4p8;
  2226. }
  2227. /* The register number is in the right place. */
  2228. /* We can't relax this new opcode. */
  2229. irel->r_addend = 0;
  2230. move_reloc (irel, srel, -1);
  2231. SNIP (2, 1, newrel);
  2232. *again = true;
  2233. }
  2234. }
  2235. /* These are either matched with a DSP6 (2-byte base) or an id24
  2236. (3-byte base). */
  2237. if (irel->r_addend & RX_RELAXA_IMM12)
  2238. {
  2239. int dspcode, offset = 0;
  2240. long ssymval;
  2241. GET_RELOC;
  2242. if ((insn[0] & 0xfc) == 0xfc)
  2243. dspcode = 1; /* Just something with one byte operand. */
  2244. else
  2245. dspcode = insn[0] & 3;
  2246. switch (dspcode)
  2247. {
  2248. case 0: offset = 2; break;
  2249. case 1: offset = 3; break;
  2250. case 2: offset = 4; break;
  2251. case 3: offset = 2; break;
  2252. }
  2253. /* These relocations sign-extend, so we must do signed compares. */
  2254. ssymval = (long) symval;
  2255. code = (insn[1] >> 2) & 3;
  2256. if (code == 0 && ssymval <= 8388607 && ssymval >= -8388608)
  2257. {
  2258. unsigned int newrel = ELF32_R_TYPE (srel->r_info);
  2259. insn[1] &= 0xf3;
  2260. insn[1] |= 0x0c;
  2261. newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
  2262. if (newrel != ELF32_R_TYPE (srel->r_info))
  2263. {
  2264. SNIP (offset, 1, newrel);
  2265. *again = true;
  2266. }
  2267. }
  2268. else if (code == 3 && ssymval <= 32767 && ssymval >= -32768)
  2269. {
  2270. unsigned int newrel = ELF32_R_TYPE (srel->r_info);
  2271. insn[1] &= 0xf3;
  2272. insn[1] |= 0x08;
  2273. newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
  2274. if (newrel != ELF32_R_TYPE (srel->r_info))
  2275. {
  2276. SNIP (offset, 1, newrel);
  2277. *again = true;
  2278. }
  2279. }
  2280. /* Special case UIMM8 format: MOV #uimm8,Rdst. */
  2281. else if (code == 2 && ssymval <= 255 && ssymval >= 16
  2282. /* Decodable bits. */
  2283. && insn[0] == 0xfb
  2284. /* Decodable bits. */
  2285. && ((insn[1] & 0x03) == 0x02))
  2286. {
  2287. int newrel;
  2288. insn[0] = 0x75;
  2289. insn[1] = 0x40 | (insn[1] >> 4);
  2290. /* We can't relax this new opcode. */
  2291. irel->r_addend = 0;
  2292. if (STACK_REL_P (ELF32_R_TYPE (srel->r_info)))
  2293. newrel = R_RX_ABS8U;
  2294. else
  2295. newrel = R_RX_DIR8U;
  2296. SNIP (2, 1, newrel);
  2297. *again = true;
  2298. }
  2299. else if (code == 2 && ssymval <= 127 && ssymval >= -128)
  2300. {
  2301. unsigned int newrel = ELF32_R_TYPE(srel->r_info);
  2302. insn[1] &= 0xf3;
  2303. insn[1] |= 0x04;
  2304. newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
  2305. if (newrel != ELF32_R_TYPE(srel->r_info))
  2306. {
  2307. SNIP (offset, 1, newrel);
  2308. *again = true;
  2309. }
  2310. }
  2311. /* Special case UIMM4 format: MOV #uimm4,Rdst. */
  2312. else if (code == 1 && ssymval <= 15 && ssymval >= 0
  2313. /* Decodable bits. */
  2314. && insn[0] == 0xfb
  2315. /* Decodable bits. */
  2316. && ((insn[1] & 0x03) == 0x02))
  2317. {
  2318. insn[0] = 0x66;
  2319. insn[1] = insn[1] >> 4;
  2320. /* We can't relax this new opcode. */
  2321. irel->r_addend = 0;
  2322. move_reloc (irel, srel, -1);
  2323. SNIP (2, 1, R_RX_RH_UIMM4p8);
  2324. *again = true;
  2325. }
  2326. }
  2327. if (irel->r_addend & RX_RELAXA_BRA)
  2328. {
  2329. unsigned int newrel = ELF32_R_TYPE (srel->r_info);
  2330. int max_pcrel3 = 4;
  2331. int alignment_glue = 0;
  2332. GET_RELOC;
  2333. /* Branches over alignment chunks are problematic, as
  2334. deleting bytes here makes the branch *further* away. We
  2335. can be agressive with branches within this alignment
  2336. block, but not branches outside it. */
  2337. if ((prev_alignment == NULL
  2338. || symval < (bfd_vma)(sec_start + prev_alignment->r_offset))
  2339. && (next_alignment == NULL
  2340. || symval > (bfd_vma)(sec_start + next_alignment->r_offset)))
  2341. alignment_glue = section_alignment_glue;
  2342. if (ELF32_R_TYPE(srel[1].r_info) == R_RX_RH_RELAX
  2343. && srel[1].r_addend & RX_RELAXA_BRA
  2344. && srel[1].r_offset < irel->r_offset + pcrel)
  2345. max_pcrel3 ++;
  2346. newrel = next_smaller_reloc (ELF32_R_TYPE (srel->r_info));
  2347. /* The values we compare PCREL with are not what you'd
  2348. expect; they're off by a little to compensate for (1)
  2349. where the reloc is relative to the insn, and (2) how much
  2350. the insn is going to change when we relax it. */
  2351. /* These we have to decode. */
  2352. switch (insn[0])
  2353. {
  2354. case 0x04: /* BRA pcdsp:24 */
  2355. if (-32768 + alignment_glue <= pcrel
  2356. && pcrel <= 32765 - alignment_glue)
  2357. {
  2358. insn[0] = 0x38;
  2359. SNIP (3, 1, newrel);
  2360. *again = true;
  2361. }
  2362. break;
  2363. case 0x38: /* BRA pcdsp:16 */
  2364. if (-128 + alignment_glue <= pcrel
  2365. && pcrel <= 127 - alignment_glue)
  2366. {
  2367. insn[0] = 0x2e;
  2368. SNIP (2, 1, newrel);
  2369. *again = true;
  2370. }
  2371. break;
  2372. case 0x2e: /* BRA pcdsp:8 */
  2373. /* Note that there's a risk here of shortening things so
  2374. much that we no longer fit this reloc; it *should*
  2375. only happen when you branch across a branch, and that
  2376. branch also devolves into BRA.S. "Real" code should
  2377. be OK. */
  2378. if (max_pcrel3 + alignment_glue <= pcrel
  2379. && pcrel <= 10 - alignment_glue
  2380. && allow_pcrel3)
  2381. {
  2382. insn[0] = 0x08;
  2383. SNIP (1, 1, newrel);
  2384. move_reloc (irel, srel, -1);
  2385. *again = true;
  2386. }
  2387. break;
  2388. case 0x05: /* BSR pcdsp:24 */
  2389. if (-32768 + alignment_glue <= pcrel
  2390. && pcrel <= 32765 - alignment_glue)
  2391. {
  2392. insn[0] = 0x39;
  2393. SNIP (1, 1, newrel);
  2394. *again = true;
  2395. }
  2396. break;
  2397. case 0x3a: /* BEQ.W pcdsp:16 */
  2398. case 0x3b: /* BNE.W pcdsp:16 */
  2399. if (-128 + alignment_glue <= pcrel
  2400. && pcrel <= 127 - alignment_glue)
  2401. {
  2402. insn[0] = 0x20 | (insn[0] & 1);
  2403. SNIP (1, 1, newrel);
  2404. *again = true;
  2405. }
  2406. break;
  2407. case 0x20: /* BEQ.B pcdsp:8 */
  2408. case 0x21: /* BNE.B pcdsp:8 */
  2409. if (max_pcrel3 + alignment_glue <= pcrel
  2410. && pcrel - alignment_glue <= 10
  2411. && allow_pcrel3)
  2412. {
  2413. insn[0] = 0x10 | ((insn[0] & 1) << 3);
  2414. SNIP (1, 1, newrel);
  2415. move_reloc (irel, srel, -1);
  2416. *again = true;
  2417. }
  2418. break;
  2419. case 0x16: /* synthetic BNE dsp24 */
  2420. case 0x1e: /* synthetic BEQ dsp24 */
  2421. if (-32767 + alignment_glue <= pcrel
  2422. && pcrel <= 32766 - alignment_glue
  2423. && insn[1] == 0x04)
  2424. {
  2425. if (insn[0] == 0x16)
  2426. insn[0] = 0x3b;
  2427. else
  2428. insn[0] = 0x3a;
  2429. /* We snip out the bytes at the end else the reloc
  2430. will get moved too, and too much. */
  2431. SNIP (3, 2, newrel);
  2432. move_reloc (irel, srel, -1);
  2433. *again = true;
  2434. }
  2435. break;
  2436. }
  2437. /* Special case - synthetic conditional branches, pcrel24.
  2438. Note that EQ and NE have been handled above. */
  2439. if ((insn[0] & 0xf0) == 0x20
  2440. && insn[1] == 0x06
  2441. && insn[2] == 0x04
  2442. && srel->r_offset != irel->r_offset + 1
  2443. && -32767 + alignment_glue <= pcrel
  2444. && pcrel <= 32766 - alignment_glue)
  2445. {
  2446. insn[1] = 0x05;
  2447. insn[2] = 0x38;
  2448. SNIP (5, 1, newrel);
  2449. *again = true;
  2450. }
  2451. /* Special case - synthetic conditional branches, pcrel16 */
  2452. if ((insn[0] & 0xf0) == 0x20
  2453. && insn[1] == 0x05
  2454. && insn[2] == 0x38
  2455. && srel->r_offset != irel->r_offset + 1
  2456. && -127 + alignment_glue <= pcrel
  2457. && pcrel <= 126 - alignment_glue)
  2458. {
  2459. int cond = (insn[0] & 0x0f) ^ 0x01;
  2460. insn[0] = 0x20 | cond;
  2461. /* By moving the reloc first, we avoid having
  2462. delete_bytes move it also. */
  2463. move_reloc (irel, srel, -2);
  2464. SNIP (2, 3, newrel);
  2465. *again = true;
  2466. }
  2467. }
  2468. BFD_ASSERT (nrelocs == 0);
  2469. /* Special case - check MOV.bwl #IMM, dsp[reg] and see if we can
  2470. use MOV.bwl #uimm:8, dsp:5[r7] format. This is tricky
  2471. because it may have one or two relocations. */
  2472. if ((insn[0] & 0xfc) == 0xf8
  2473. && (insn[1] & 0x80) == 0x00
  2474. && (insn[0] & 0x03) != 0x03)
  2475. {
  2476. int dcode, icode, reg, ioff, dscale, ilen;
  2477. bfd_vma disp_val = 0;
  2478. long imm_val = 0;
  2479. Elf_Internal_Rela * disp_rel = 0;
  2480. Elf_Internal_Rela * imm_rel = 0;
  2481. /* Reset this. */
  2482. srel = irel;
  2483. dcode = insn[0] & 0x03;
  2484. icode = (insn[1] >> 2) & 0x03;
  2485. reg = (insn[1] >> 4) & 0x0f;
  2486. ioff = dcode == 1 ? 3 : dcode == 2 ? 4 : 2;
  2487. /* Figure out what the dispacement is. */
  2488. if (dcode == 1 || dcode == 2)
  2489. {
  2490. /* There's a displacement. See if there's a reloc for it. */
  2491. if (srel[1].r_offset == irel->r_offset + 2)
  2492. {
  2493. GET_RELOC;
  2494. disp_val = symval;
  2495. disp_rel = srel;
  2496. }
  2497. else
  2498. {
  2499. if (dcode == 1)
  2500. disp_val = insn[2];
  2501. else
  2502. {
  2503. #if RX_OPCODE_BIG_ENDIAN
  2504. disp_val = insn[2] * 256 + insn[3];
  2505. #else
  2506. disp_val = insn[2] + insn[3] * 256;
  2507. #endif
  2508. }
  2509. switch (insn[1] & 3)
  2510. {
  2511. case 1:
  2512. disp_val *= 2;
  2513. scale = 2;
  2514. break;
  2515. case 2:
  2516. disp_val *= 4;
  2517. scale = 4;
  2518. break;
  2519. }
  2520. }
  2521. }
  2522. dscale = scale;
  2523. /* Figure out what the immediate is. */
  2524. if (srel[1].r_offset == irel->r_offset + ioff)
  2525. {
  2526. GET_RELOC;
  2527. imm_val = (long) symval;
  2528. imm_rel = srel;
  2529. }
  2530. else
  2531. {
  2532. unsigned char * ip = insn + ioff;
  2533. switch (icode)
  2534. {
  2535. case 1:
  2536. /* For byte writes, we don't sign extend. Makes the math easier later. */
  2537. if (scale == 1)
  2538. imm_val = ip[0];
  2539. else
  2540. imm_val = (char) ip[0];
  2541. break;
  2542. case 2:
  2543. #if RX_OPCODE_BIG_ENDIAN
  2544. imm_val = ((char) ip[0] << 8) | ip[1];
  2545. #else
  2546. imm_val = ((char) ip[1] << 8) | ip[0];
  2547. #endif
  2548. break;
  2549. case 3:
  2550. #if RX_OPCODE_BIG_ENDIAN
  2551. imm_val = ((char) ip[0] << 16) | (ip[1] << 8) | ip[2];
  2552. #else
  2553. imm_val = ((char) ip[2] << 16) | (ip[1] << 8) | ip[0];
  2554. #endif
  2555. break;
  2556. case 0:
  2557. #if RX_OPCODE_BIG_ENDIAN
  2558. imm_val = ((unsigned) ip[0] << 24) | (ip[1] << 16) | (ip[2] << 8) | ip[3];
  2559. #else
  2560. imm_val = ((unsigned) ip[3] << 24) | (ip[2] << 16) | (ip[1] << 8) | ip[0];
  2561. #endif
  2562. break;
  2563. }
  2564. }
  2565. ilen = 2;
  2566. switch (dcode)
  2567. {
  2568. case 1:
  2569. ilen += 1;
  2570. break;
  2571. case 2:
  2572. ilen += 2;
  2573. break;
  2574. }
  2575. switch (icode)
  2576. {
  2577. case 1:
  2578. ilen += 1;
  2579. break;
  2580. case 2:
  2581. ilen += 2;
  2582. break;
  2583. case 3:
  2584. ilen += 3;
  2585. break;
  2586. case 4:
  2587. ilen += 4;
  2588. break;
  2589. }
  2590. /* The shortcut happens when the immediate is 0..255,
  2591. register r0 to r7, and displacement (scaled) 0..31. */
  2592. if (0 <= imm_val && imm_val <= 255
  2593. && 0 <= reg && reg <= 7
  2594. && disp_val / dscale <= 31)
  2595. {
  2596. insn[0] = 0x3c | (insn[1] & 0x03);
  2597. insn[1] = (((disp_val / dscale) << 3) & 0x80) | (reg << 4) | ((disp_val/dscale) & 0x0f);
  2598. insn[2] = imm_val;
  2599. if (disp_rel)
  2600. {
  2601. int newrel = R_RX_NONE;
  2602. switch (dscale)
  2603. {
  2604. case 1:
  2605. newrel = R_RX_RH_ABS5p8B;
  2606. break;
  2607. case 2:
  2608. newrel = R_RX_RH_ABS5p8W;
  2609. break;
  2610. case 4:
  2611. newrel = R_RX_RH_ABS5p8L;
  2612. break;
  2613. }
  2614. disp_rel->r_info = ELF32_R_INFO (ELF32_R_SYM (disp_rel->r_info), newrel);
  2615. move_reloc (irel, disp_rel, -1);
  2616. }
  2617. if (imm_rel)
  2618. {
  2619. imm_rel->r_info = ELF32_R_INFO (ELF32_R_SYM (imm_rel->r_info), R_RX_DIR8U);
  2620. move_reloc (disp_rel ? disp_rel : irel,
  2621. imm_rel,
  2622. irel->r_offset - imm_rel->r_offset + 2);
  2623. }
  2624. SNIPNR (3, ilen - 3);
  2625. *again = true;
  2626. /* We can't relax this new opcode. */
  2627. irel->r_addend = 0;
  2628. }
  2629. }
  2630. }
  2631. /* We can't reliably relax branches to DIR3U_PCREL unless we know
  2632. whatever they're branching over won't shrink any more. If we're
  2633. basically done here, do one more pass just for branches - but
  2634. don't request a pass after that one! */
  2635. if (!*again && !allow_pcrel3)
  2636. {
  2637. bool ignored;
  2638. elf32_rx_relax_section (abfd, sec, link_info, &ignored, true);
  2639. }
  2640. return true;
  2641. error_return:
  2642. free (free_contents);
  2643. if (shndx_buf != NULL)
  2644. {
  2645. shndx_hdr->contents = NULL;
  2646. free (shndx_buf);
  2647. }
  2648. free (free_intsyms);
  2649. return false;
  2650. }
  2651. static bool
  2652. elf32_rx_relax_section_wrapper (bfd *abfd,
  2653. asection *sec,
  2654. struct bfd_link_info *link_info,
  2655. bool *again)
  2656. {
  2657. return elf32_rx_relax_section (abfd, sec, link_info, again, false);
  2658. }
  2659. /* Function to set the ELF flag bits. */
  2660. static bool
  2661. rx_elf_set_private_flags (bfd * abfd, flagword flags)
  2662. {
  2663. elf_elfheader (abfd)->e_flags = flags;
  2664. elf_flags_init (abfd) = true;
  2665. return true;
  2666. }
  2667. static bool no_warn_mismatch = false;
  2668. static bool ignore_lma = true;
  2669. void bfd_elf32_rx_set_target_flags (bool, bool);
  2670. void
  2671. bfd_elf32_rx_set_target_flags (bool user_no_warn_mismatch,
  2672. bool user_ignore_lma)
  2673. {
  2674. no_warn_mismatch = user_no_warn_mismatch;
  2675. ignore_lma = user_ignore_lma;
  2676. }
  2677. /* Converts FLAGS into a descriptive string.
  2678. Returns a static pointer. */
  2679. static const char *
  2680. describe_flags (flagword flags, char *buf)
  2681. {
  2682. buf[0] = 0;
  2683. if (flags & E_FLAG_RX_64BIT_DOUBLES)
  2684. strcat (buf, "64-bit doubles");
  2685. else
  2686. strcat (buf, "32-bit doubles");
  2687. if (flags & E_FLAG_RX_DSP)
  2688. strcat (buf, ", dsp");
  2689. else
  2690. strcat (buf, ", no dsp");
  2691. if (flags & E_FLAG_RX_PID)
  2692. strcat (buf, ", pid");
  2693. else
  2694. strcat (buf, ", no pid");
  2695. if (flags & E_FLAG_RX_ABI)
  2696. strcat (buf, ", RX ABI");
  2697. else
  2698. strcat (buf, ", GCC ABI");
  2699. if (flags & E_FLAG_RX_SINSNS_SET)
  2700. strcat (buf, flags & E_FLAG_RX_SINSNS_YES ? ", uses String instructions" : ", bans String instructions");
  2701. return buf;
  2702. }
  2703. /* Merge backend specific data from an object file to the output
  2704. object file when linking. */
  2705. static bool
  2706. rx_elf_merge_private_bfd_data (bfd * ibfd, struct bfd_link_info *info)
  2707. {
  2708. bfd *obfd = info->output_bfd;
  2709. flagword old_flags;
  2710. flagword new_flags;
  2711. bool error = false;
  2712. new_flags = elf_elfheader (ibfd)->e_flags;
  2713. old_flags = elf_elfheader (obfd)->e_flags;
  2714. if (!elf_flags_init (obfd))
  2715. {
  2716. /* First call, no flags set. */
  2717. elf_flags_init (obfd) = true;
  2718. elf_elfheader (obfd)->e_flags = new_flags;
  2719. }
  2720. else if (old_flags != new_flags)
  2721. {
  2722. flagword known_flags;
  2723. if (old_flags & E_FLAG_RX_SINSNS_SET)
  2724. {
  2725. if ((new_flags & E_FLAG_RX_SINSNS_SET) == 0)
  2726. {
  2727. new_flags &= ~ E_FLAG_RX_SINSNS_MASK;
  2728. new_flags |= (old_flags & E_FLAG_RX_SINSNS_MASK);
  2729. }
  2730. }
  2731. else if (new_flags & E_FLAG_RX_SINSNS_SET)
  2732. {
  2733. old_flags &= ~ E_FLAG_RX_SINSNS_MASK;
  2734. old_flags |= (new_flags & E_FLAG_RX_SINSNS_MASK);
  2735. }
  2736. known_flags = E_FLAG_RX_ABI | E_FLAG_RX_64BIT_DOUBLES
  2737. | E_FLAG_RX_DSP | E_FLAG_RX_PID | E_FLAG_RX_SINSNS_MASK;
  2738. if ((old_flags ^ new_flags) & known_flags)
  2739. {
  2740. /* Only complain if flag bits we care about do not match.
  2741. Other bits may be set, since older binaries did use some
  2742. deprecated flags. */
  2743. if (no_warn_mismatch)
  2744. {
  2745. elf_elfheader (obfd)->e_flags = (new_flags | old_flags) & known_flags;
  2746. }
  2747. else
  2748. {
  2749. char buf[128];
  2750. _bfd_error_handler (_("there is a conflict merging the"
  2751. " ELF header flags from %pB"),
  2752. ibfd);
  2753. _bfd_error_handler (_(" the input file's flags: %s"),
  2754. describe_flags (new_flags, buf));
  2755. _bfd_error_handler (_(" the output file's flags: %s"),
  2756. describe_flags (old_flags, buf));
  2757. error = true;
  2758. }
  2759. }
  2760. else
  2761. elf_elfheader (obfd)->e_flags = new_flags & known_flags;
  2762. }
  2763. if (error)
  2764. bfd_set_error (bfd_error_bad_value);
  2765. return !error;
  2766. }
  2767. static bool
  2768. rx_elf_print_private_bfd_data (bfd * abfd, void * ptr)
  2769. {
  2770. FILE * file = (FILE *) ptr;
  2771. flagword flags;
  2772. char buf[128];
  2773. BFD_ASSERT (abfd != NULL && ptr != NULL);
  2774. /* Print normal ELF private data. */
  2775. _bfd_elf_print_private_bfd_data (abfd, ptr);
  2776. flags = elf_elfheader (abfd)->e_flags;
  2777. fprintf (file, _("private flags = 0x%lx:"), (long) flags);
  2778. fprintf (file, "%s", describe_flags (flags, buf));
  2779. return true;
  2780. }
  2781. /* Return the MACH for an e_flags value. */
  2782. static int
  2783. elf32_rx_machine (bfd * abfd ATTRIBUTE_UNUSED)
  2784. {
  2785. #if 0 /* FIXME: EF_RX_CPU_MASK collides with E_FLAG_RX_...
  2786. Need to sort out how these flag bits are used.
  2787. For now we assume that the flags are OK. */
  2788. if ((elf_elfheader (abfd)->e_flags & EF_RX_CPU_MASK) == EF_RX_CPU_RX)
  2789. #endif
  2790. if ((elf_elfheader (abfd)->e_flags & E_FLAG_RX_V2))
  2791. return bfd_mach_rx_v2;
  2792. else if ((elf_elfheader (abfd)->e_flags & E_FLAG_RX_V3))
  2793. return bfd_mach_rx_v3;
  2794. else
  2795. return bfd_mach_rx;
  2796. return 0;
  2797. }
  2798. static bool
  2799. rx_elf_object_p (bfd * abfd)
  2800. {
  2801. int i;
  2802. unsigned int u;
  2803. Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
  2804. Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
  2805. int nphdrs = ehdr->e_phnum;
  2806. sec_ptr bsec;
  2807. static int saw_be = false;
  2808. bfd_vma end_phdroff;
  2809. /* We never want to automatically choose the non-swapping big-endian
  2810. target. The user can only get that explicitly, such as with -I
  2811. and objcopy. */
  2812. if (abfd->xvec == &rx_elf32_be_ns_vec
  2813. && abfd->target_defaulted)
  2814. return false;
  2815. /* BFD->target_defaulted is not set to TRUE when a target is chosen
  2816. as a fallback, so we check for "scanning" to know when to stop
  2817. using the non-swapping target. */
  2818. if (abfd->xvec == &rx_elf32_be_ns_vec
  2819. && saw_be)
  2820. return false;
  2821. if (abfd->xvec == &rx_elf32_be_vec)
  2822. saw_be = true;
  2823. bfd_default_set_arch_mach (abfd, bfd_arch_rx,
  2824. elf32_rx_machine (abfd));
  2825. /* For each PHDR in the object, we must find some section that
  2826. corresponds (based on matching file offsets) and use its VMA
  2827. information to reconstruct the p_vaddr field we clobbered when we
  2828. wrote it out. */
  2829. /* If PT_LOAD headers include the ELF file header or program headers
  2830. then the PT_LOAD header does not start with some section contents.
  2831. Making adjustments based on the difference between sh_offset and
  2832. p_offset is nonsense in such cases. Exclude them. Note that
  2833. since standard linker scripts for RX do not use SIZEOF_HEADERS,
  2834. the linker won't normally create PT_LOAD segments covering the
  2835. headers so this is mainly for passing the ld testsuite.
  2836. FIXME. Why are we looking at non-PT_LOAD headers here? */
  2837. end_phdroff = ehdr->e_ehsize;
  2838. if (ehdr->e_phoff != 0)
  2839. end_phdroff = ehdr->e_phoff + nphdrs * ehdr->e_phentsize;
  2840. for (i=0; i<nphdrs; i++)
  2841. {
  2842. for (u=0; u<elf_tdata(abfd)->num_elf_sections; u++)
  2843. {
  2844. Elf_Internal_Shdr *sec = elf_tdata(abfd)->elf_sect_ptr[u];
  2845. if (phdr[i].p_filesz
  2846. && phdr[i].p_offset >= end_phdroff
  2847. && phdr[i].p_offset <= (bfd_vma) sec->sh_offset
  2848. && sec->sh_size > 0
  2849. && sec->sh_type != SHT_NOBITS
  2850. && (bfd_vma)sec->sh_offset <= phdr[i].p_offset + (phdr[i].p_filesz - 1))
  2851. {
  2852. /* Found one! The difference between the two addresses,
  2853. plus the difference between the two file offsets, is
  2854. enough information to reconstruct the lma. */
  2855. /* Example where they aren't:
  2856. PHDR[1] = lma fffc0100 offset 00002010 size 00000100
  2857. SEC[6] = vma 00000050 offset 00002050 size 00000040
  2858. The correct LMA for the section is fffc0140 + (2050-2010).
  2859. */
  2860. phdr[i].p_vaddr = sec->sh_addr + (sec->sh_offset - phdr[i].p_offset);
  2861. break;
  2862. }
  2863. }
  2864. /* We must update the bfd sections as well, so we don't stop
  2865. with one match. */
  2866. bsec = abfd->sections;
  2867. while (bsec)
  2868. {
  2869. if (phdr[i].p_filesz
  2870. && phdr[i].p_vaddr <= bsec->vma
  2871. && bsec->vma <= phdr[i].p_vaddr + (phdr[i].p_filesz - 1))
  2872. {
  2873. bsec->lma = phdr[i].p_paddr + (bsec->vma - phdr[i].p_vaddr);
  2874. }
  2875. bsec = bsec->next;
  2876. }
  2877. }
  2878. return true;
  2879. }
  2880. static bool
  2881. rx_linux_object_p (bfd * abfd)
  2882. {
  2883. bfd_default_set_arch_mach (abfd, bfd_arch_rx, elf32_rx_machine (abfd));
  2884. return true;
  2885. }
  2886. #ifdef DEBUG
  2887. void
  2888. rx_dump_symtab (bfd * abfd, void * internal_syms, void * external_syms)
  2889. {
  2890. size_t locsymcount;
  2891. Elf_Internal_Sym * isymbuf;
  2892. Elf_Internal_Sym * isymend;
  2893. Elf_Internal_Sym * isym;
  2894. Elf_Internal_Shdr * symtab_hdr;
  2895. char * st_info_str;
  2896. char * st_info_stb_str;
  2897. char * st_other_str;
  2898. char * st_shndx_str;
  2899. symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
  2900. locsymcount = symtab_hdr->sh_size / get_elf_backend_data (abfd)->s->sizeof_sym;
  2901. if (!internal_syms)
  2902. isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr,
  2903. symtab_hdr->sh_info, 0,
  2904. internal_syms, external_syms, NULL);
  2905. else
  2906. isymbuf = internal_syms;
  2907. isymend = isymbuf + locsymcount;
  2908. for (isym = isymbuf ; isym < isymend ; isym++)
  2909. {
  2910. switch (ELF_ST_TYPE (isym->st_info))
  2911. {
  2912. case STT_FUNC: st_info_str = "STT_FUNC"; break;
  2913. case STT_SECTION: st_info_str = "STT_SECTION"; break;
  2914. case STT_FILE: st_info_str = "STT_FILE"; break;
  2915. case STT_OBJECT: st_info_str = "STT_OBJECT"; break;
  2916. case STT_TLS: st_info_str = "STT_TLS"; break;
  2917. default: st_info_str = "";
  2918. }
  2919. switch (ELF_ST_BIND (isym->st_info))
  2920. {
  2921. case STB_LOCAL: st_info_stb_str = "STB_LOCAL"; break;
  2922. case STB_GLOBAL: st_info_stb_str = "STB_GLOBAL"; break;
  2923. default: st_info_stb_str = "";
  2924. }
  2925. switch (ELF_ST_VISIBILITY (isym->st_other))
  2926. {
  2927. case STV_DEFAULT: st_other_str = "STV_DEFAULT"; break;
  2928. case STV_INTERNAL: st_other_str = "STV_INTERNAL"; break;
  2929. case STV_PROTECTED: st_other_str = "STV_PROTECTED"; break;
  2930. default: st_other_str = "";
  2931. }
  2932. switch (isym->st_shndx)
  2933. {
  2934. case SHN_ABS: st_shndx_str = "SHN_ABS"; break;
  2935. case SHN_COMMON: st_shndx_str = "SHN_COMMON"; break;
  2936. case SHN_UNDEF: st_shndx_str = "SHN_UNDEF"; break;
  2937. default: st_shndx_str = "";
  2938. }
  2939. printf ("isym = %p st_value = %lx st_size = %lx st_name = (%lu) %s "
  2940. "st_info = (%d) %s %s st_other = (%d) %s st_shndx = (%d) %s\n",
  2941. isym,
  2942. (unsigned long) isym->st_value,
  2943. (unsigned long) isym->st_size,
  2944. isym->st_name,
  2945. bfd_elf_string_from_elf_section (abfd, symtab_hdr->sh_link,
  2946. isym->st_name),
  2947. isym->st_info, st_info_str, st_info_stb_str,
  2948. isym->st_other, st_other_str,
  2949. isym->st_shndx, st_shndx_str);
  2950. }
  2951. }
  2952. char *
  2953. rx_get_reloc (long reloc)
  2954. {
  2955. if (0 <= reloc && reloc < R_RX_max)
  2956. return rx_elf_howto_table[reloc].name;
  2957. return "";
  2958. }
  2959. #endif /* DEBUG */
  2960. /* We must take care to keep the on-disk copy of any code sections
  2961. that are fully linked swapped if the target is big endian, to match
  2962. the Renesas tools. */
  2963. /* The rule is: big endian object that are final-link executables,
  2964. have code sections stored with 32-bit words swapped relative to
  2965. what you'd get by default. */
  2966. static bool
  2967. rx_get_section_contents (bfd * abfd,
  2968. sec_ptr section,
  2969. void * location,
  2970. file_ptr offset,
  2971. bfd_size_type count)
  2972. {
  2973. int exec = (abfd->flags & EXEC_P) ? 1 : 0;
  2974. int s_code = (section->flags & SEC_CODE) ? 1 : 0;
  2975. bool rv;
  2976. #ifdef DJDEBUG
  2977. fprintf (stderr, "dj: get %ld %ld from %s %s e%d sc%d %08lx:%08lx\n",
  2978. (long) offset, (long) count, section->name,
  2979. bfd_big_endian(abfd) ? "be" : "le",
  2980. exec, s_code, (long unsigned) section->filepos,
  2981. (long unsigned) offset);
  2982. #endif
  2983. if (exec && s_code && bfd_big_endian (abfd))
  2984. {
  2985. char * cloc = (char *) location;
  2986. bfd_size_type cnt, end_cnt;
  2987. rv = true;
  2988. /* Fetch and swap unaligned bytes at the beginning. */
  2989. if (offset % 4)
  2990. {
  2991. char buf[4];
  2992. rv = _bfd_generic_get_section_contents (abfd, section, buf,
  2993. (offset & -4), 4);
  2994. if (!rv)
  2995. return false;
  2996. bfd_putb32 (bfd_getl32 (buf), buf);
  2997. cnt = 4 - (offset % 4);
  2998. if (cnt > count)
  2999. cnt = count;
  3000. memcpy (location, buf + (offset % 4), cnt);
  3001. count -= cnt;
  3002. offset += cnt;
  3003. cloc += count;
  3004. }
  3005. end_cnt = count % 4;
  3006. /* Fetch and swap the middle bytes. */
  3007. if (count >= 4)
  3008. {
  3009. rv = _bfd_generic_get_section_contents (abfd, section, cloc, offset,
  3010. count - end_cnt);
  3011. if (!rv)
  3012. return false;
  3013. for (cnt = count; cnt >= 4; cnt -= 4, cloc += 4)
  3014. bfd_putb32 (bfd_getl32 (cloc), cloc);
  3015. }
  3016. /* Fetch and swap the end bytes. */
  3017. if (end_cnt > 0)
  3018. {
  3019. char buf[4];
  3020. /* Fetch the end bytes. */
  3021. rv = _bfd_generic_get_section_contents (abfd, section, buf,
  3022. offset + count - end_cnt, 4);
  3023. if (!rv)
  3024. return false;
  3025. bfd_putb32 (bfd_getl32 (buf), buf);
  3026. memcpy (cloc, buf, end_cnt);
  3027. }
  3028. }
  3029. else
  3030. rv = _bfd_generic_get_section_contents (abfd, section, location, offset, count);
  3031. return rv;
  3032. }
  3033. #ifdef DJDEBUG
  3034. static bool
  3035. rx2_set_section_contents (bfd * abfd,
  3036. sec_ptr section,
  3037. const void * location,
  3038. file_ptr offset,
  3039. bfd_size_type count)
  3040. {
  3041. bfd_size_type i;
  3042. fprintf (stderr, " set sec %s %08x loc %p offset %#x count %#x\n",
  3043. section->name, (unsigned) section->vma, location, (int) offset, (int) count);
  3044. for (i = 0; i < count; i++)
  3045. {
  3046. if (i % 16 == 0 && i > 0)
  3047. fprintf (stderr, "\n");
  3048. if (i % 16 && i % 4 == 0)
  3049. fprintf (stderr, " ");
  3050. if (i % 16 == 0)
  3051. fprintf (stderr, " %08x:", (int) (section->vma + offset + i));
  3052. fprintf (stderr, " %02x", ((unsigned char *) location)[i]);
  3053. }
  3054. fprintf (stderr, "\n");
  3055. return _bfd_elf_set_section_contents (abfd, section, location, offset, count);
  3056. }
  3057. #define _bfd_elf_set_section_contents rx2_set_section_contents
  3058. #endif
  3059. static bool
  3060. rx_set_section_contents (bfd * abfd,
  3061. sec_ptr section,
  3062. const void * location,
  3063. file_ptr offset,
  3064. bfd_size_type count)
  3065. {
  3066. bool exec = (abfd->flags & EXEC_P) != 0;
  3067. bool s_code = (section->flags & SEC_CODE) != 0;
  3068. bool rv;
  3069. char * swapped_data = NULL;
  3070. bfd_size_type i;
  3071. bfd_vma caddr = section->vma + offset;
  3072. file_ptr faddr = 0;
  3073. bfd_size_type scount;
  3074. #ifdef DJDEBUG
  3075. bfd_size_type i;
  3076. fprintf (stderr, "\ndj: set %ld %ld to %s %s e%d sc%d\n",
  3077. (long) offset, (long) count, section->name,
  3078. bfd_big_endian (abfd) ? "be" : "le",
  3079. exec, s_code);
  3080. for (i = 0; i < count; i++)
  3081. {
  3082. int a = section->vma + offset + i;
  3083. if (a % 16 == 0 && a > 0)
  3084. fprintf (stderr, "\n");
  3085. if (a % 16 && a % 4 == 0)
  3086. fprintf (stderr, " ");
  3087. if (a % 16 == 0 || i == 0)
  3088. fprintf (stderr, " %08x:", (int) (section->vma + offset + i));
  3089. fprintf (stderr, " %02x", ((unsigned char *) location)[i]);
  3090. }
  3091. fprintf (stderr, "\n");
  3092. #endif
  3093. if (! exec || ! s_code || ! bfd_big_endian (abfd))
  3094. return _bfd_elf_set_section_contents (abfd, section, location, offset, count);
  3095. while (count > 0 && caddr > 0 && caddr % 4)
  3096. {
  3097. switch (caddr % 4)
  3098. {
  3099. case 0: faddr = offset + 3; break;
  3100. case 1: faddr = offset + 1; break;
  3101. case 2: faddr = offset - 1; break;
  3102. case 3: faddr = offset - 3; break;
  3103. }
  3104. rv = _bfd_elf_set_section_contents (abfd, section, location, faddr, 1);
  3105. if (! rv)
  3106. return rv;
  3107. location = (bfd_byte *) location + 1;
  3108. offset ++;
  3109. count --;
  3110. caddr ++;
  3111. }
  3112. scount = (int)(count / 4) * 4;
  3113. if (scount > 0)
  3114. {
  3115. char * cloc = (char *) location;
  3116. swapped_data = (char *) bfd_alloc (abfd, count);
  3117. if (swapped_data == NULL)
  3118. return false;
  3119. for (i = 0; i < count; i += 4)
  3120. {
  3121. bfd_vma v = bfd_getl32 (cloc + i);
  3122. bfd_putb32 (v, swapped_data + i);
  3123. }
  3124. rv = _bfd_elf_set_section_contents (abfd, section, swapped_data, offset, scount);
  3125. if (!rv)
  3126. return rv;
  3127. }
  3128. count -= scount;
  3129. location = (bfd_byte *) location + scount;
  3130. offset += scount;
  3131. if (count > 0)
  3132. {
  3133. caddr = section->vma + offset;
  3134. while (count > 0)
  3135. {
  3136. switch (caddr % 4)
  3137. {
  3138. case 0: faddr = offset + 3; break;
  3139. case 1: faddr = offset + 1; break;
  3140. case 2: faddr = offset - 1; break;
  3141. case 3: faddr = offset - 3; break;
  3142. }
  3143. rv = _bfd_elf_set_section_contents (abfd, section, location, faddr, 1);
  3144. if (! rv)
  3145. return rv;
  3146. location = (bfd_byte *) location + 1;
  3147. offset ++;
  3148. count --;
  3149. caddr ++;
  3150. }
  3151. }
  3152. return true;
  3153. }
  3154. static bool
  3155. rx_final_link (bfd * abfd, struct bfd_link_info * info)
  3156. {
  3157. asection * o;
  3158. for (o = abfd->sections; o != NULL; o = o->next)
  3159. {
  3160. #ifdef DJDEBUG
  3161. fprintf (stderr, "sec %s fl %x vma %lx lma %lx size %lx raw %lx\n",
  3162. o->name, o->flags, o->vma, o->lma, o->size, o->rawsize);
  3163. #endif
  3164. if (o->flags & SEC_CODE
  3165. && bfd_big_endian (abfd)
  3166. && o->size % 4)
  3167. {
  3168. #ifdef DJDEBUG
  3169. fprintf (stderr, "adjusting...\n");
  3170. #endif
  3171. o->size += 4 - (o->size % 4);
  3172. }
  3173. }
  3174. return bfd_elf_final_link (abfd, info);
  3175. }
  3176. static bool
  3177. elf32_rx_modify_headers (bfd *abfd, struct bfd_link_info *info)
  3178. {
  3179. const struct elf_backend_data * bed;
  3180. struct elf_obj_tdata * tdata;
  3181. Elf_Internal_Phdr * phdr;
  3182. unsigned int count;
  3183. unsigned int i;
  3184. bed = get_elf_backend_data (abfd);
  3185. tdata = elf_tdata (abfd);
  3186. phdr = tdata->phdr;
  3187. count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
  3188. if (ignore_lma)
  3189. for (i = count; i-- != 0;)
  3190. if (phdr[i].p_type == PT_LOAD)
  3191. {
  3192. /* The Renesas tools expect p_paddr to be zero. However,
  3193. there is no other way to store the writable data in ROM for
  3194. startup initialization. So, we let the linker *think*
  3195. we're using paddr and vaddr the "usual" way, but at the
  3196. last minute we move the paddr into the vaddr (which is what
  3197. the simulator uses) and zero out paddr. Note that this
  3198. does not affect the section headers, just the program
  3199. headers. We hope. */
  3200. phdr[i].p_vaddr = phdr[i].p_paddr;
  3201. #if 0 /* If we zero out p_paddr, then the LMA in the section table
  3202. becomes wrong. */
  3203. phdr[i].p_paddr = 0;
  3204. #endif
  3205. }
  3206. return _bfd_elf_modify_headers (abfd, info);
  3207. }
  3208. /* The default literal sections should always be marked as "code" (i.e.,
  3209. SHF_EXECINSTR). This is particularly important for big-endian mode
  3210. when we do not want their contents byte reversed. */
  3211. static const struct bfd_elf_special_section elf32_rx_special_sections[] =
  3212. {
  3213. { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC + SHF_EXECINSTR },
  3214. { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC + SHF_EXECINSTR },
  3215. { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC + SHF_EXECINSTR },
  3216. { NULL, 0, 0, 0, 0 }
  3217. };
  3218. typedef struct {
  3219. bfd *abfd;
  3220. struct bfd_link_info *info;
  3221. bfd_vma table_start;
  3222. int table_size;
  3223. bfd_vma *table_handlers;
  3224. bfd_vma table_default_handler;
  3225. struct bfd_link_hash_entry **table_entries;
  3226. struct bfd_link_hash_entry *table_default_entry;
  3227. FILE *mapfile;
  3228. } RX_Table_Info;
  3229. static bool
  3230. rx_table_find (struct bfd_hash_entry *vent, void *vinfo)
  3231. {
  3232. RX_Table_Info *info = (RX_Table_Info *)vinfo;
  3233. struct bfd_link_hash_entry *ent = (struct bfd_link_hash_entry *)vent;
  3234. const char *name; /* of the symbol we've found */
  3235. asection *sec;
  3236. struct bfd *abfd;
  3237. int idx;
  3238. const char *tname; /* name of the table */
  3239. bfd_vma start_addr, end_addr;
  3240. char *buf;
  3241. struct bfd_link_hash_entry * h;
  3242. /* We're looking for globally defined symbols of the form
  3243. $tablestart$<NAME>. */
  3244. if (ent->type != bfd_link_hash_defined
  3245. && ent->type != bfd_link_hash_defweak)
  3246. return true;
  3247. name = ent->root.string;
  3248. sec = ent->u.def.section;
  3249. abfd = sec->owner;
  3250. if (!startswith (name, "$tablestart$"))
  3251. return true;
  3252. sec->flags |= SEC_KEEP;
  3253. tname = name + 12;
  3254. start_addr = ent->u.def.value;
  3255. /* At this point, we can't build the table but we can (and must)
  3256. find all the related symbols and mark their sections as SEC_KEEP
  3257. so we don't garbage collect them. */
  3258. buf = (char *) bfd_malloc (12 + 10 + strlen (tname));
  3259. if (buf == NULL)
  3260. return false;
  3261. sprintf (buf, "$tableend$%s", tname);
  3262. h = bfd_link_hash_lookup (info->info->hash, buf, false, false, true);
  3263. if (!h || (h->type != bfd_link_hash_defined
  3264. && h->type != bfd_link_hash_defweak))
  3265. {
  3266. /* xgettext:c-format */
  3267. _bfd_error_handler (_("%pB:%pA: table %s missing corresponding %s"),
  3268. abfd, sec, name, buf);
  3269. return true;
  3270. }
  3271. if (h->u.def.section != ent->u.def.section)
  3272. {
  3273. /* xgettext:c-format */
  3274. _bfd_error_handler (_("%pB:%pA: %s and %s must be in the same input section"),
  3275. h->u.def.section->owner, h->u.def.section,
  3276. name, buf);
  3277. return true;
  3278. }
  3279. end_addr = h->u.def.value;
  3280. sprintf (buf, "$tableentry$default$%s", tname);
  3281. h = bfd_link_hash_lookup (info->info->hash, buf, false, false, true);
  3282. if (h && (h->type == bfd_link_hash_defined
  3283. || h->type == bfd_link_hash_defweak))
  3284. {
  3285. h->u.def.section->flags |= SEC_KEEP;
  3286. }
  3287. for (idx = 0; idx < (int) (end_addr - start_addr) / 4; idx ++)
  3288. {
  3289. sprintf (buf, "$tableentry$%d$%s", idx, tname);
  3290. h = bfd_link_hash_lookup (info->info->hash, buf, false, false, true);
  3291. if (h && (h->type == bfd_link_hash_defined
  3292. || h->type == bfd_link_hash_defweak))
  3293. {
  3294. h->u.def.section->flags |= SEC_KEEP;
  3295. }
  3296. }
  3297. /* Return TRUE to keep scanning, FALSE to end the traversal. */
  3298. return true;
  3299. }
  3300. /* We need to check for table entry symbols and build the tables, and
  3301. we need to do it before the linker does garbage collection. This function is
  3302. called once per input object file. */
  3303. static bool
  3304. rx_check_directives
  3305. (bfd * abfd ATTRIBUTE_UNUSED,
  3306. struct bfd_link_info * info ATTRIBUTE_UNUSED)
  3307. {
  3308. RX_Table_Info stuff;
  3309. stuff.abfd = abfd;
  3310. stuff.info = info;
  3311. bfd_hash_traverse (&(info->hash->table), rx_table_find, &stuff);
  3312. return true;
  3313. }
  3314. static bool
  3315. rx_table_map_2 (struct bfd_hash_entry *vent, void *vinfo)
  3316. {
  3317. RX_Table_Info *info = (RX_Table_Info *)vinfo;
  3318. struct bfd_link_hash_entry *ent = (struct bfd_link_hash_entry *)vent;
  3319. int idx;
  3320. const char *name;
  3321. bfd_vma addr;
  3322. /* See if the symbol ENT has an address listed in the table, and
  3323. isn't a debug/special symbol. If so, put it in the table. */
  3324. if (ent->type != bfd_link_hash_defined
  3325. && ent->type != bfd_link_hash_defweak)
  3326. return true;
  3327. name = ent->root.string;
  3328. if (name[0] == '$' || name[0] == '.' || name[0] < ' ')
  3329. return true;
  3330. addr = (ent->u.def.value
  3331. + ent->u.def.section->output_section->vma
  3332. + ent->u.def.section->output_offset);
  3333. for (idx = 0; idx < info->table_size; idx ++)
  3334. if (addr == info->table_handlers[idx])
  3335. info->table_entries[idx] = ent;
  3336. if (addr == info->table_default_handler)
  3337. info->table_default_entry = ent;
  3338. return true;
  3339. }
  3340. static bool
  3341. rx_table_map (struct bfd_hash_entry *vent, void *vinfo)
  3342. {
  3343. RX_Table_Info *info = (RX_Table_Info *)vinfo;
  3344. struct bfd_link_hash_entry *ent = (struct bfd_link_hash_entry *)vent;
  3345. const char *name; /* of the symbol we've found */
  3346. int idx;
  3347. const char *tname; /* name of the table */
  3348. bfd_vma start_addr, end_addr;
  3349. char *buf;
  3350. struct bfd_link_hash_entry * h;
  3351. int need_elipses;
  3352. /* We're looking for globally defined symbols of the form
  3353. $tablestart$<NAME>. */
  3354. if (ent->type != bfd_link_hash_defined
  3355. && ent->type != bfd_link_hash_defweak)
  3356. return true;
  3357. name = ent->root.string;
  3358. if (!startswith (name, "$tablestart$"))
  3359. return true;
  3360. tname = name + 12;
  3361. start_addr = (ent->u.def.value
  3362. + ent->u.def.section->output_section->vma
  3363. + ent->u.def.section->output_offset);
  3364. buf = (char *) bfd_malloc (12 + 10 + strlen (tname));
  3365. if (buf == NULL)
  3366. return false;
  3367. sprintf (buf, "$tableend$%s", tname);
  3368. end_addr = get_symbol_value_maybe (buf, info->info);
  3369. sprintf (buf, "$tableentry$default$%s", tname);
  3370. h = bfd_link_hash_lookup (info->info->hash, buf, false, false, true);
  3371. if (h)
  3372. {
  3373. info->table_default_handler = (h->u.def.value
  3374. + h->u.def.section->output_section->vma
  3375. + h->u.def.section->output_offset);
  3376. }
  3377. else
  3378. /* Zero is a valid handler address! */
  3379. info->table_default_handler = (bfd_vma) (-1);
  3380. info->table_default_entry = NULL;
  3381. info->table_start = start_addr;
  3382. info->table_size = (int) (end_addr - start_addr) / 4;
  3383. info->table_handlers = (bfd_vma *)
  3384. bfd_malloc (info->table_size * sizeof (bfd_vma));
  3385. if (info->table_handlers == NULL)
  3386. {
  3387. free (buf);
  3388. return false;
  3389. }
  3390. info->table_entries = (struct bfd_link_hash_entry **)
  3391. bfd_malloc (info->table_size * sizeof (struct bfd_link_hash_entry));
  3392. if (info->table_entries == NULL)
  3393. {
  3394. free (info->table_handlers);
  3395. free (buf);
  3396. return false;
  3397. }
  3398. for (idx = 0; idx < (int) (end_addr - start_addr) / 4; idx ++)
  3399. {
  3400. sprintf (buf, "$tableentry$%d$%s", idx, tname);
  3401. h = bfd_link_hash_lookup (info->info->hash, buf, false, false, true);
  3402. if (h && (h->type == bfd_link_hash_defined
  3403. || h->type == bfd_link_hash_defweak))
  3404. {
  3405. info->table_handlers[idx] = (h->u.def.value
  3406. + h->u.def.section->output_section->vma
  3407. + h->u.def.section->output_offset);
  3408. }
  3409. else
  3410. info->table_handlers[idx] = info->table_default_handler;
  3411. info->table_entries[idx] = NULL;
  3412. }
  3413. free (buf);
  3414. bfd_hash_traverse (&(info->info->hash->table), rx_table_map_2, info);
  3415. fprintf (info->mapfile, "\nRX Vector Table: %s has %d entries at 0x%08" BFD_VMA_FMT "x\n\n",
  3416. tname, info->table_size, start_addr);
  3417. if (info->table_default_entry)
  3418. fprintf (info->mapfile, " default handler is: %s at 0x%08" BFD_VMA_FMT "x\n",
  3419. info->table_default_entry->root.string,
  3420. info->table_default_handler);
  3421. else if (info->table_default_handler != (bfd_vma)(-1))
  3422. fprintf (info->mapfile, " default handler is at 0x%08" BFD_VMA_FMT "x\n",
  3423. info->table_default_handler);
  3424. else
  3425. fprintf (info->mapfile, " no default handler\n");
  3426. need_elipses = 1;
  3427. for (idx = 0; idx < info->table_size; idx ++)
  3428. {
  3429. if (info->table_handlers[idx] == info->table_default_handler)
  3430. {
  3431. if (need_elipses)
  3432. fprintf (info->mapfile, " . . .\n");
  3433. need_elipses = 0;
  3434. continue;
  3435. }
  3436. need_elipses = 1;
  3437. fprintf (info->mapfile, " 0x%08" BFD_VMA_FMT "x [%3d] ", start_addr + 4 * idx, idx);
  3438. if (info->table_handlers[idx] == (bfd_vma) (-1))
  3439. fprintf (info->mapfile, "(no handler found)\n");
  3440. else if (info->table_handlers[idx] == info->table_default_handler)
  3441. {
  3442. if (info->table_default_entry)
  3443. fprintf (info->mapfile, "(default)\n");
  3444. else
  3445. fprintf (info->mapfile, "(default)\n");
  3446. }
  3447. else if (info->table_entries[idx])
  3448. {
  3449. fprintf (info->mapfile, "0x%08" BFD_VMA_FMT "x %s\n", info->table_handlers[idx], info->table_entries[idx]->root.string);
  3450. }
  3451. else
  3452. {
  3453. fprintf (info->mapfile, "0x%08" BFD_VMA_FMT "x ???\n", info->table_handlers[idx]);
  3454. }
  3455. }
  3456. if (need_elipses)
  3457. fprintf (info->mapfile, " . . .\n");
  3458. return true;
  3459. }
  3460. void
  3461. rx_additional_link_map_text (bfd *obfd, struct bfd_link_info *info, FILE *mapfile)
  3462. {
  3463. /* We scan the symbol table looking for $tableentry$'s, and for
  3464. each, try to deduce which handlers go with which entries. */
  3465. RX_Table_Info stuff;
  3466. stuff.abfd = obfd;
  3467. stuff.info = info;
  3468. stuff.mapfile = mapfile;
  3469. bfd_hash_traverse (&(info->hash->table), rx_table_map, &stuff);
  3470. }
  3471. #define ELF_ARCH bfd_arch_rx
  3472. #define ELF_MACHINE_CODE EM_RX
  3473. #define ELF_MAXPAGESIZE 0x1000
  3474. #define TARGET_BIG_SYM rx_elf32_be_vec
  3475. #define TARGET_BIG_NAME "elf32-rx-be"
  3476. #define TARGET_LITTLE_SYM rx_elf32_le_vec
  3477. #define TARGET_LITTLE_NAME "elf32-rx-le"
  3478. #define elf_info_to_howto_rel NULL
  3479. #define elf_info_to_howto rx_info_to_howto_rela
  3480. #define elf_backend_object_p rx_elf_object_p
  3481. #define elf_backend_relocate_section rx_elf_relocate_section
  3482. #define elf_symbol_leading_char ('_')
  3483. #define elf_backend_can_gc_sections 1
  3484. #define elf_backend_modify_headers elf32_rx_modify_headers
  3485. #define bfd_elf32_bfd_reloc_type_lookup rx_reloc_type_lookup
  3486. #define bfd_elf32_bfd_reloc_name_lookup rx_reloc_name_lookup
  3487. #define bfd_elf32_bfd_set_private_flags rx_elf_set_private_flags
  3488. #define bfd_elf32_bfd_merge_private_bfd_data rx_elf_merge_private_bfd_data
  3489. #define bfd_elf32_bfd_print_private_bfd_data rx_elf_print_private_bfd_data
  3490. #define bfd_elf32_get_section_contents rx_get_section_contents
  3491. #define bfd_elf32_set_section_contents rx_set_section_contents
  3492. #define bfd_elf32_bfd_final_link rx_final_link
  3493. #define bfd_elf32_bfd_relax_section elf32_rx_relax_section_wrapper
  3494. #define elf_backend_special_sections elf32_rx_special_sections
  3495. #define elf_backend_check_directives rx_check_directives
  3496. #include "elf32-target.h"
  3497. /* We define a second big-endian target that doesn't have the custom
  3498. section get/set hooks, for times when we want to preserve the
  3499. pre-swapped .text sections (like objcopy). */
  3500. #undef TARGET_BIG_SYM
  3501. #define TARGET_BIG_SYM rx_elf32_be_ns_vec
  3502. #undef TARGET_BIG_NAME
  3503. #define TARGET_BIG_NAME "elf32-rx-be-ns"
  3504. #undef TARGET_LITTLE_SYM
  3505. #undef bfd_elf32_get_section_contents
  3506. #undef bfd_elf32_set_section_contents
  3507. #undef elf32_bed
  3508. #define elf32_bed elf32_rx_be_ns_bed
  3509. #include "elf32-target.h"
  3510. #undef TARGET_LITTLE_SYM
  3511. #define TARGET_LITTLE_SYM rx_elf32_linux_le_vec
  3512. #undef TARGET_LITTLE_NAME
  3513. #define TARGET_LITTLE_NAME "elf32-rx-linux"
  3514. #undef TARGET_BIG_SYM
  3515. #undef TARGET_BIG_NAME
  3516. #undef elf_backend_object_p
  3517. #define elf_backend_object_p rx_linux_object_p
  3518. #undef elf_symbol_leading_char
  3519. #undef elf32_bed
  3520. #define elf32_bed elf32_rx_le_linux_bed
  3521. #include "elf32-target.h"