elf32-arm.c 608 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873128741287512876128771287812879128801288112882128831288412885128861288712888128891289012891128921289312894128951289612897128981289912900129011290212903129041290512906129071290812909129101291112912129131291412915129161291712918129191292012921129221292312924129251292612927129281292912930129311293212933129341293512936129371293812939129401294112942129431294412945129461294712948129491295012951129521295312954129551295612957129581295912960129611296212963129641296512966129671296812969129701297112972129731297412975129761297712978129791298012981129821298312984129851298612987129881298912990129911299212993129941299512996129971299812999130001300113002130031300413005130061300713008130091301013011130121301313014130151301613017130181301913020130211302213023130241302513026130271302813029130301303113032130331303413035130361303713038130391304013041130421304313044130451304613047130481304913050130511305213053130541305513056130571305813059130601306113062130631306413065130661306713068130691307013071130721307313074130751307613077130781307913080130811308213083130841308513086130871308813089130901309113092130931309413095130961309713098130991310013101131021310313104131051310613107131081310913110131111311213113131141311513116131171311813119131201312113122131231312413125131261312713128131291313013131131321313313134131351313613137131381313913140131411314213143131441314513146131471314813149131501315113152131531315413155131561315713158131591316013161131621316313164131651316613167131681316913170131711317213173131741317513176131771317813179131801318113182131831318413185131861318713188131891319013191131921319313194131951319613197131981319913200132011320213203132041320513206132071320813209132101321113212132131321413215132161321713218132191322013221132221322313224132251322613227132281322913230132311323213233132341323513236132371323813239132401324113242132431324413245132461324713248132491325013251132521325313254132551325613257132581325913260132611326213263132641326513266132671326813269132701327113272132731327413275132761327713278132791328013281132821328313284132851328613287132881328913290132911329213293132941329513296132971329813299133001330113302133031330413305133061330713308133091331013311133121331313314133151331613317133181331913320133211332213323133241332513326133271332813329133301333113332133331333413335133361333713338133391334013341133421334313344133451334613347133481334913350133511335213353133541335513356133571335813359133601336113362133631336413365133661336713368133691337013371133721337313374133751337613377133781337913380133811338213383133841338513386133871338813389133901339113392133931339413395133961339713398133991340013401134021340313404134051340613407134081340913410134111341213413134141341513416134171341813419134201342113422134231342413425134261342713428134291343013431134321343313434134351343613437134381343913440134411344213443134441344513446134471344813449134501345113452134531345413455134561345713458134591346013461134621346313464134651346613467134681346913470134711347213473134741347513476134771347813479134801348113482134831348413485134861348713488134891349013491134921349313494134951349613497134981349913500135011350213503135041350513506135071350813509135101351113512135131351413515135161351713518135191352013521135221352313524135251352613527135281352913530135311353213533135341353513536135371353813539135401354113542135431354413545135461354713548135491355013551135521355313554135551355613557135581355913560135611356213563135641356513566135671356813569135701357113572135731357413575135761357713578135791358013581135821358313584135851358613587135881358913590135911359213593135941359513596135971359813599136001360113602136031360413605136061360713608136091361013611136121361313614136151361613617136181361913620136211362213623136241362513626136271362813629136301363113632136331363413635136361363713638136391364013641136421364313644136451364613647136481364913650136511365213653136541365513656136571365813659136601366113662136631366413665136661366713668136691367013671136721367313674136751367613677136781367913680136811368213683136841368513686136871368813689136901369113692136931369413695136961369713698136991370013701137021370313704137051370613707137081370913710137111371213713137141371513716137171371813719137201372113722137231372413725137261372713728137291373013731137321373313734137351373613737137381373913740137411374213743137441374513746137471374813749137501375113752137531375413755137561375713758137591376013761137621376313764137651376613767137681376913770137711377213773137741377513776137771377813779137801378113782137831378413785137861378713788137891379013791137921379313794137951379613797137981379913800138011380213803138041380513806138071380813809138101381113812138131381413815138161381713818138191382013821138221382313824138251382613827138281382913830138311383213833138341383513836138371383813839138401384113842138431384413845138461384713848138491385013851138521385313854138551385613857138581385913860138611386213863138641386513866138671386813869138701387113872138731387413875138761387713878138791388013881138821388313884138851388613887138881388913890138911389213893138941389513896138971389813899139001390113902139031390413905139061390713908139091391013911139121391313914139151391613917139181391913920139211392213923139241392513926139271392813929139301393113932139331393413935139361393713938139391394013941139421394313944139451394613947139481394913950139511395213953139541395513956139571395813959139601396113962139631396413965139661396713968139691397013971139721397313974139751397613977139781397913980139811398213983139841398513986139871398813989139901399113992139931399413995139961399713998139991400014001140021400314004140051400614007140081400914010140111401214013140141401514016140171401814019140201402114022140231402414025140261402714028140291403014031140321403314034140351403614037140381403914040140411404214043140441404514046140471404814049140501405114052140531405414055140561405714058140591406014061140621406314064140651406614067140681406914070140711407214073140741407514076140771407814079140801408114082140831408414085140861408714088140891409014091140921409314094140951409614097140981409914100141011410214103141041410514106141071410814109141101411114112141131411414115141161411714118141191412014121141221412314124141251412614127141281412914130141311413214133141341413514136141371413814139141401414114142141431414414145141461414714148141491415014151141521415314154141551415614157141581415914160141611416214163141641416514166141671416814169141701417114172141731417414175141761417714178141791418014181141821418314184141851418614187141881418914190141911419214193141941419514196141971419814199142001420114202142031420414205142061420714208142091421014211142121421314214142151421614217142181421914220142211422214223142241422514226142271422814229142301423114232142331423414235142361423714238142391424014241142421424314244142451424614247142481424914250142511425214253142541425514256142571425814259142601426114262142631426414265142661426714268142691427014271142721427314274142751427614277142781427914280142811428214283142841428514286142871428814289142901429114292142931429414295142961429714298142991430014301143021430314304143051430614307143081430914310143111431214313143141431514316143171431814319143201432114322143231432414325143261432714328143291433014331143321433314334143351433614337143381433914340143411434214343143441434514346143471434814349143501435114352143531435414355143561435714358143591436014361143621436314364143651436614367143681436914370143711437214373143741437514376143771437814379143801438114382143831438414385143861438714388143891439014391143921439314394143951439614397143981439914400144011440214403144041440514406144071440814409144101441114412144131441414415144161441714418144191442014421144221442314424144251442614427144281442914430144311443214433144341443514436144371443814439144401444114442144431444414445144461444714448144491445014451144521445314454144551445614457144581445914460144611446214463144641446514466144671446814469144701447114472144731447414475144761447714478144791448014481144821448314484144851448614487144881448914490144911449214493144941449514496144971449814499145001450114502145031450414505145061450714508145091451014511145121451314514145151451614517145181451914520145211452214523145241452514526145271452814529145301453114532145331453414535145361453714538145391454014541145421454314544145451454614547145481454914550145511455214553145541455514556145571455814559145601456114562145631456414565145661456714568145691457014571145721457314574145751457614577145781457914580145811458214583145841458514586145871458814589145901459114592145931459414595145961459714598145991460014601146021460314604146051460614607146081460914610146111461214613146141461514616146171461814619146201462114622146231462414625146261462714628146291463014631146321463314634146351463614637146381463914640146411464214643146441464514646146471464814649146501465114652146531465414655146561465714658146591466014661146621466314664146651466614667146681466914670146711467214673146741467514676146771467814679146801468114682146831468414685146861468714688146891469014691146921469314694146951469614697146981469914700147011470214703147041470514706147071470814709147101471114712147131471414715147161471714718147191472014721147221472314724147251472614727147281472914730147311473214733147341473514736147371473814739147401474114742147431474414745147461474714748147491475014751147521475314754147551475614757147581475914760147611476214763147641476514766147671476814769147701477114772147731477414775147761477714778147791478014781147821478314784147851478614787147881478914790147911479214793147941479514796147971479814799148001480114802148031480414805148061480714808148091481014811148121481314814148151481614817148181481914820148211482214823148241482514826148271482814829148301483114832148331483414835148361483714838148391484014841148421484314844148451484614847148481484914850148511485214853148541485514856148571485814859148601486114862148631486414865148661486714868148691487014871148721487314874148751487614877148781487914880148811488214883148841488514886148871488814889148901489114892148931489414895148961489714898148991490014901149021490314904149051490614907149081490914910149111491214913149141491514916149171491814919149201492114922149231492414925149261492714928149291493014931149321493314934149351493614937149381493914940149411494214943149441494514946149471494814949149501495114952149531495414955149561495714958149591496014961149621496314964149651496614967149681496914970149711497214973149741497514976149771497814979149801498114982149831498414985149861498714988149891499014991149921499314994149951499614997149981499915000150011500215003150041500515006150071500815009150101501115012150131501415015150161501715018150191502015021150221502315024150251502615027150281502915030150311503215033150341503515036150371503815039150401504115042150431504415045150461504715048150491505015051150521505315054150551505615057150581505915060150611506215063150641506515066150671506815069150701507115072150731507415075150761507715078150791508015081150821508315084150851508615087150881508915090150911509215093150941509515096150971509815099151001510115102151031510415105151061510715108151091511015111151121511315114151151511615117151181511915120151211512215123151241512515126151271512815129151301513115132151331513415135151361513715138151391514015141151421514315144151451514615147151481514915150151511515215153151541515515156151571515815159151601516115162151631516415165151661516715168151691517015171151721517315174151751517615177151781517915180151811518215183151841518515186151871518815189151901519115192151931519415195151961519715198151991520015201152021520315204152051520615207152081520915210152111521215213152141521515216152171521815219152201522115222152231522415225152261522715228152291523015231152321523315234152351523615237152381523915240152411524215243152441524515246152471524815249152501525115252152531525415255152561525715258152591526015261152621526315264152651526615267152681526915270152711527215273152741527515276152771527815279152801528115282152831528415285152861528715288152891529015291152921529315294152951529615297152981529915300153011530215303153041530515306153071530815309153101531115312153131531415315153161531715318153191532015321153221532315324153251532615327153281532915330153311533215333153341533515336153371533815339153401534115342153431534415345153461534715348153491535015351153521535315354153551535615357153581535915360153611536215363153641536515366153671536815369153701537115372153731537415375153761537715378153791538015381153821538315384153851538615387153881538915390153911539215393153941539515396153971539815399154001540115402154031540415405154061540715408154091541015411154121541315414154151541615417154181541915420154211542215423154241542515426154271542815429154301543115432154331543415435154361543715438154391544015441154421544315444154451544615447154481544915450154511545215453154541545515456154571545815459154601546115462154631546415465154661546715468154691547015471154721547315474154751547615477154781547915480154811548215483154841548515486154871548815489154901549115492154931549415495154961549715498154991550015501155021550315504155051550615507155081550915510155111551215513155141551515516155171551815519155201552115522155231552415525155261552715528155291553015531155321553315534155351553615537155381553915540155411554215543155441554515546155471554815549155501555115552155531555415555155561555715558155591556015561155621556315564155651556615567155681556915570155711557215573155741557515576155771557815579155801558115582155831558415585155861558715588155891559015591155921559315594155951559615597155981559915600156011560215603156041560515606156071560815609156101561115612156131561415615156161561715618156191562015621156221562315624156251562615627156281562915630156311563215633156341563515636156371563815639156401564115642156431564415645156461564715648156491565015651156521565315654156551565615657156581565915660156611566215663156641566515666156671566815669156701567115672156731567415675156761567715678156791568015681156821568315684156851568615687156881568915690156911569215693156941569515696156971569815699157001570115702157031570415705157061570715708157091571015711157121571315714157151571615717157181571915720157211572215723157241572515726157271572815729157301573115732157331573415735157361573715738157391574015741157421574315744157451574615747157481574915750157511575215753157541575515756157571575815759157601576115762157631576415765157661576715768157691577015771157721577315774157751577615777157781577915780157811578215783157841578515786157871578815789157901579115792157931579415795157961579715798157991580015801158021580315804158051580615807158081580915810158111581215813158141581515816158171581815819158201582115822158231582415825158261582715828158291583015831158321583315834158351583615837158381583915840158411584215843158441584515846158471584815849158501585115852158531585415855158561585715858158591586015861158621586315864158651586615867158681586915870158711587215873158741587515876158771587815879158801588115882158831588415885158861588715888158891589015891158921589315894158951589615897158981589915900159011590215903159041590515906159071590815909159101591115912159131591415915159161591715918159191592015921159221592315924159251592615927159281592915930159311593215933159341593515936159371593815939159401594115942159431594415945159461594715948159491595015951159521595315954159551595615957159581595915960159611596215963159641596515966159671596815969159701597115972159731597415975159761597715978159791598015981159821598315984159851598615987159881598915990159911599215993159941599515996159971599815999160001600116002160031600416005160061600716008160091601016011160121601316014160151601616017160181601916020160211602216023160241602516026160271602816029160301603116032160331603416035160361603716038160391604016041160421604316044160451604616047160481604916050160511605216053160541605516056160571605816059160601606116062160631606416065160661606716068160691607016071160721607316074160751607616077160781607916080160811608216083160841608516086160871608816089160901609116092160931609416095160961609716098160991610016101161021610316104161051610616107161081610916110161111611216113161141611516116161171611816119161201612116122161231612416125161261612716128161291613016131161321613316134161351613616137161381613916140161411614216143161441614516146161471614816149161501615116152161531615416155161561615716158161591616016161161621616316164161651616616167161681616916170161711617216173161741617516176161771617816179161801618116182161831618416185161861618716188161891619016191161921619316194161951619616197161981619916200162011620216203162041620516206162071620816209162101621116212162131621416215162161621716218162191622016221162221622316224162251622616227162281622916230162311623216233162341623516236162371623816239162401624116242162431624416245162461624716248162491625016251162521625316254162551625616257162581625916260162611626216263162641626516266162671626816269162701627116272162731627416275162761627716278162791628016281162821628316284162851628616287162881628916290162911629216293162941629516296162971629816299163001630116302163031630416305163061630716308163091631016311163121631316314163151631616317163181631916320163211632216323163241632516326163271632816329163301633116332163331633416335163361633716338163391634016341163421634316344163451634616347163481634916350163511635216353163541635516356163571635816359163601636116362163631636416365163661636716368163691637016371163721637316374163751637616377163781637916380163811638216383163841638516386163871638816389163901639116392163931639416395163961639716398163991640016401164021640316404164051640616407164081640916410164111641216413164141641516416164171641816419164201642116422164231642416425164261642716428164291643016431164321643316434164351643616437164381643916440164411644216443164441644516446164471644816449164501645116452164531645416455164561645716458164591646016461164621646316464164651646616467164681646916470164711647216473164741647516476164771647816479164801648116482164831648416485164861648716488164891649016491164921649316494164951649616497164981649916500165011650216503165041650516506165071650816509165101651116512165131651416515165161651716518165191652016521165221652316524165251652616527165281652916530165311653216533165341653516536165371653816539165401654116542165431654416545165461654716548165491655016551165521655316554165551655616557165581655916560165611656216563165641656516566165671656816569165701657116572165731657416575165761657716578165791658016581165821658316584165851658616587165881658916590165911659216593165941659516596165971659816599166001660116602166031660416605166061660716608166091661016611166121661316614166151661616617166181661916620166211662216623166241662516626166271662816629166301663116632166331663416635166361663716638166391664016641166421664316644166451664616647166481664916650166511665216653166541665516656166571665816659166601666116662166631666416665166661666716668166691667016671166721667316674166751667616677166781667916680166811668216683166841668516686166871668816689166901669116692166931669416695166961669716698166991670016701167021670316704167051670616707167081670916710167111671216713167141671516716167171671816719167201672116722167231672416725167261672716728167291673016731167321673316734167351673616737167381673916740167411674216743167441674516746167471674816749167501675116752167531675416755167561675716758167591676016761167621676316764167651676616767167681676916770167711677216773167741677516776167771677816779167801678116782167831678416785167861678716788167891679016791167921679316794167951679616797167981679916800168011680216803168041680516806168071680816809168101681116812168131681416815168161681716818168191682016821168221682316824168251682616827168281682916830168311683216833168341683516836168371683816839168401684116842168431684416845168461684716848168491685016851168521685316854168551685616857168581685916860168611686216863168641686516866168671686816869168701687116872168731687416875168761687716878168791688016881168821688316884168851688616887168881688916890168911689216893168941689516896168971689816899169001690116902169031690416905169061690716908169091691016911169121691316914169151691616917169181691916920169211692216923169241692516926169271692816929169301693116932169331693416935169361693716938169391694016941169421694316944169451694616947169481694916950169511695216953169541695516956169571695816959169601696116962169631696416965169661696716968169691697016971169721697316974169751697616977169781697916980169811698216983169841698516986169871698816989169901699116992169931699416995169961699716998169991700017001170021700317004170051700617007170081700917010170111701217013170141701517016170171701817019170201702117022170231702417025170261702717028170291703017031170321703317034170351703617037170381703917040170411704217043170441704517046170471704817049170501705117052170531705417055170561705717058170591706017061170621706317064170651706617067170681706917070170711707217073170741707517076170771707817079170801708117082170831708417085170861708717088170891709017091170921709317094170951709617097170981709917100171011710217103171041710517106171071710817109171101711117112171131711417115171161711717118171191712017121171221712317124171251712617127171281712917130171311713217133171341713517136171371713817139171401714117142171431714417145171461714717148171491715017151171521715317154171551715617157171581715917160171611716217163171641716517166171671716817169171701717117172171731717417175171761717717178171791718017181171821718317184171851718617187171881718917190171911719217193171941719517196171971719817199172001720117202172031720417205172061720717208172091721017211172121721317214172151721617217172181721917220172211722217223172241722517226172271722817229172301723117232172331723417235172361723717238172391724017241172421724317244172451724617247172481724917250172511725217253172541725517256172571725817259172601726117262172631726417265172661726717268172691727017271172721727317274172751727617277172781727917280172811728217283172841728517286172871728817289172901729117292172931729417295172961729717298172991730017301173021730317304173051730617307173081730917310173111731217313173141731517316173171731817319173201732117322173231732417325173261732717328173291733017331173321733317334173351733617337173381733917340173411734217343173441734517346173471734817349173501735117352173531735417355173561735717358173591736017361173621736317364173651736617367173681736917370173711737217373173741737517376173771737817379173801738117382173831738417385173861738717388173891739017391173921739317394173951739617397173981739917400174011740217403174041740517406174071740817409174101741117412174131741417415174161741717418174191742017421174221742317424174251742617427174281742917430174311743217433174341743517436174371743817439174401744117442174431744417445174461744717448174491745017451174521745317454174551745617457174581745917460174611746217463174641746517466174671746817469174701747117472174731747417475174761747717478174791748017481174821748317484174851748617487174881748917490174911749217493174941749517496174971749817499175001750117502175031750417505175061750717508175091751017511175121751317514175151751617517175181751917520175211752217523175241752517526175271752817529175301753117532175331753417535175361753717538175391754017541175421754317544175451754617547175481754917550175511755217553175541755517556175571755817559175601756117562175631756417565175661756717568175691757017571175721757317574175751757617577175781757917580175811758217583175841758517586175871758817589175901759117592175931759417595175961759717598175991760017601176021760317604176051760617607176081760917610176111761217613176141761517616176171761817619176201762117622176231762417625176261762717628176291763017631176321763317634176351763617637176381763917640176411764217643176441764517646176471764817649176501765117652176531765417655176561765717658176591766017661176621766317664176651766617667176681766917670176711767217673176741767517676176771767817679176801768117682176831768417685176861768717688176891769017691176921769317694176951769617697176981769917700177011770217703177041770517706177071770817709177101771117712177131771417715177161771717718177191772017721177221772317724177251772617727177281772917730177311773217733177341773517736177371773817739177401774117742177431774417745177461774717748177491775017751177521775317754177551775617757177581775917760177611776217763177641776517766177671776817769177701777117772177731777417775177761777717778177791778017781177821778317784177851778617787177881778917790177911779217793177941779517796177971779817799178001780117802178031780417805178061780717808178091781017811178121781317814178151781617817178181781917820178211782217823178241782517826178271782817829178301783117832178331783417835178361783717838178391784017841178421784317844178451784617847178481784917850178511785217853178541785517856178571785817859178601786117862178631786417865178661786717868178691787017871178721787317874178751787617877178781787917880178811788217883178841788517886178871788817889178901789117892178931789417895178961789717898178991790017901179021790317904179051790617907179081790917910179111791217913179141791517916179171791817919179201792117922179231792417925179261792717928179291793017931179321793317934179351793617937179381793917940179411794217943179441794517946179471794817949179501795117952179531795417955179561795717958179591796017961179621796317964179651796617967179681796917970179711797217973179741797517976179771797817979179801798117982179831798417985179861798717988179891799017991179921799317994179951799617997179981799918000180011800218003180041800518006180071800818009180101801118012180131801418015180161801718018180191802018021180221802318024180251802618027180281802918030180311803218033180341803518036180371803818039180401804118042180431804418045180461804718048180491805018051180521805318054180551805618057180581805918060180611806218063180641806518066180671806818069180701807118072180731807418075180761807718078180791808018081180821808318084180851808618087180881808918090180911809218093180941809518096180971809818099181001810118102181031810418105181061810718108181091811018111181121811318114181151811618117181181811918120181211812218123181241812518126181271812818129181301813118132181331813418135181361813718138181391814018141181421814318144181451814618147181481814918150181511815218153181541815518156181571815818159181601816118162181631816418165181661816718168181691817018171181721817318174181751817618177181781817918180181811818218183181841818518186181871818818189181901819118192181931819418195181961819718198181991820018201182021820318204182051820618207182081820918210182111821218213182141821518216182171821818219182201822118222182231822418225182261822718228182291823018231182321823318234182351823618237182381823918240182411824218243182441824518246182471824818249182501825118252182531825418255182561825718258182591826018261182621826318264182651826618267182681826918270182711827218273182741827518276182771827818279182801828118282182831828418285182861828718288182891829018291182921829318294182951829618297182981829918300183011830218303183041830518306183071830818309183101831118312183131831418315183161831718318183191832018321183221832318324183251832618327183281832918330183311833218333183341833518336183371833818339183401834118342183431834418345183461834718348183491835018351183521835318354183551835618357183581835918360183611836218363183641836518366183671836818369183701837118372183731837418375183761837718378183791838018381183821838318384183851838618387183881838918390183911839218393183941839518396183971839818399184001840118402184031840418405184061840718408184091841018411184121841318414184151841618417184181841918420184211842218423184241842518426184271842818429184301843118432184331843418435184361843718438184391844018441184421844318444184451844618447184481844918450184511845218453184541845518456184571845818459184601846118462184631846418465184661846718468184691847018471184721847318474184751847618477184781847918480184811848218483184841848518486184871848818489184901849118492184931849418495184961849718498184991850018501185021850318504185051850618507185081850918510185111851218513185141851518516185171851818519185201852118522185231852418525185261852718528185291853018531185321853318534185351853618537185381853918540185411854218543185441854518546185471854818549185501855118552185531855418555185561855718558185591856018561185621856318564185651856618567185681856918570185711857218573185741857518576185771857818579185801858118582185831858418585185861858718588185891859018591185921859318594185951859618597185981859918600186011860218603186041860518606186071860818609186101861118612186131861418615186161861718618186191862018621186221862318624186251862618627186281862918630186311863218633186341863518636186371863818639186401864118642186431864418645186461864718648186491865018651186521865318654186551865618657186581865918660186611866218663186641866518666186671866818669186701867118672186731867418675186761867718678186791868018681186821868318684186851868618687186881868918690186911869218693186941869518696186971869818699187001870118702187031870418705187061870718708187091871018711187121871318714187151871618717187181871918720187211872218723187241872518726187271872818729187301873118732187331873418735187361873718738187391874018741187421874318744187451874618747187481874918750187511875218753187541875518756187571875818759187601876118762187631876418765187661876718768187691877018771187721877318774187751877618777187781877918780187811878218783187841878518786187871878818789187901879118792187931879418795187961879718798187991880018801188021880318804188051880618807188081880918810188111881218813188141881518816188171881818819188201882118822188231882418825188261882718828188291883018831188321883318834188351883618837188381883918840188411884218843188441884518846188471884818849188501885118852188531885418855188561885718858188591886018861188621886318864188651886618867188681886918870188711887218873188741887518876188771887818879188801888118882188831888418885188861888718888188891889018891188921889318894188951889618897188981889918900189011890218903189041890518906189071890818909189101891118912189131891418915189161891718918189191892018921189221892318924189251892618927189281892918930189311893218933189341893518936189371893818939189401894118942189431894418945189461894718948189491895018951189521895318954189551895618957189581895918960189611896218963189641896518966189671896818969189701897118972189731897418975189761897718978189791898018981189821898318984189851898618987189881898918990189911899218993189941899518996189971899818999190001900119002190031900419005190061900719008190091901019011190121901319014190151901619017190181901919020190211902219023190241902519026190271902819029190301903119032190331903419035190361903719038190391904019041190421904319044190451904619047190481904919050190511905219053190541905519056190571905819059190601906119062190631906419065190661906719068190691907019071190721907319074190751907619077190781907919080190811908219083190841908519086190871908819089190901909119092190931909419095190961909719098190991910019101191021910319104191051910619107191081910919110191111911219113191141911519116191171911819119191201912119122191231912419125191261912719128191291913019131191321913319134191351913619137191381913919140191411914219143191441914519146191471914819149191501915119152191531915419155191561915719158191591916019161191621916319164191651916619167191681916919170191711917219173191741917519176191771917819179191801918119182191831918419185191861918719188191891919019191191921919319194191951919619197191981919919200192011920219203192041920519206192071920819209192101921119212192131921419215192161921719218192191922019221192221922319224192251922619227192281922919230192311923219233192341923519236192371923819239192401924119242192431924419245192461924719248192491925019251192521925319254192551925619257192581925919260192611926219263192641926519266192671926819269192701927119272192731927419275192761927719278192791928019281192821928319284192851928619287192881928919290192911929219293192941929519296192971929819299193001930119302193031930419305193061930719308193091931019311193121931319314193151931619317193181931919320193211932219323193241932519326193271932819329193301933119332193331933419335193361933719338193391934019341193421934319344193451934619347193481934919350193511935219353193541935519356193571935819359193601936119362193631936419365193661936719368193691937019371193721937319374193751937619377193781937919380193811938219383193841938519386193871938819389193901939119392193931939419395193961939719398193991940019401194021940319404194051940619407194081940919410194111941219413194141941519416194171941819419194201942119422194231942419425194261942719428194291943019431194321943319434194351943619437194381943919440194411944219443194441944519446194471944819449194501945119452194531945419455194561945719458194591946019461194621946319464194651946619467194681946919470194711947219473194741947519476194771947819479194801948119482194831948419485194861948719488194891949019491194921949319494194951949619497194981949919500195011950219503195041950519506195071950819509195101951119512195131951419515195161951719518195191952019521195221952319524195251952619527195281952919530195311953219533195341953519536195371953819539195401954119542195431954419545195461954719548195491955019551195521955319554195551955619557195581955919560195611956219563195641956519566195671956819569195701957119572195731957419575195761957719578195791958019581195821958319584195851958619587195881958919590195911959219593195941959519596195971959819599196001960119602196031960419605196061960719608196091961019611196121961319614196151961619617196181961919620196211962219623196241962519626196271962819629196301963119632196331963419635196361963719638196391964019641196421964319644196451964619647196481964919650196511965219653196541965519656196571965819659196601966119662196631966419665196661966719668196691967019671196721967319674196751967619677196781967919680196811968219683196841968519686196871968819689196901969119692196931969419695196961969719698196991970019701197021970319704197051970619707197081970919710197111971219713197141971519716197171971819719197201972119722197231972419725197261972719728197291973019731197321973319734197351973619737197381973919740197411974219743197441974519746197471974819749197501975119752197531975419755197561975719758197591976019761197621976319764197651976619767197681976919770197711977219773197741977519776197771977819779197801978119782197831978419785197861978719788197891979019791197921979319794197951979619797197981979919800198011980219803198041980519806198071980819809198101981119812198131981419815198161981719818198191982019821198221982319824198251982619827198281982919830198311983219833198341983519836198371983819839198401984119842198431984419845198461984719848198491985019851198521985319854198551985619857198581985919860198611986219863198641986519866198671986819869198701987119872198731987419875198761987719878198791988019881198821988319884198851988619887198881988919890198911989219893198941989519896198971989819899199001990119902199031990419905199061990719908199091991019911199121991319914199151991619917199181991919920199211992219923199241992519926199271992819929199301993119932199331993419935199361993719938199391994019941199421994319944199451994619947199481994919950199511995219953199541995519956199571995819959199601996119962199631996419965199661996719968199691997019971199721997319974199751997619977199781997919980199811998219983199841998519986199871998819989199901999119992199931999419995199961999719998199992000020001200022000320004200052000620007200082000920010200112001220013200142001520016200172001820019200202002120022200232002420025200262002720028200292003020031200322003320034200352003620037200382003920040200412004220043200442004520046200472004820049200502005120052200532005420055200562005720058200592006020061200622006320064200652006620067200682006920070200712007220073200742007520076200772007820079200802008120082200832008420085200862008720088200892009020091200922009320094200952009620097200982009920100201012010220103201042010520106201072010820109201102011120112201132011420115201162011720118201192012020121201222012320124201252012620127201282012920130201312013220133201342013520136201372013820139201402014120142201432014420145201462014720148201492015020151201522015320154201552015620157201582015920160201612016220163201642016520166201672016820169201702017120172201732017420175201762017720178201792018020181201822018320184201852018620187201882018920190201912019220193201942019520196201972019820199202002020120202202032020420205202062020720208202092021020211202122021320214202152021620217202182021920220202212022220223202242022520226202272022820229202302023120232202332023420235202362023720238202392024020241202422024320244202452024620247202482024920250202512025220253202542025520256202572025820259202602026120262202632026420265202662026720268202692027020271202722027320274202752027620277202782027920280202812028220283202842028520286202872028820289202902029120292202932029420295202962029720298202992030020301203022030320304203052030620307203082030920310203112031220313203142031520316203172031820319203202032120322203232032420325203262032720328203292033020331203322033320334203352033620337203382033920340203412034220343203442034520346203472034820349203502035120352203532035420355203562035720358203592036020361203622036320364203652036620367203682036920370203712037220373203742037520376203772037820379203802038120382203832038420385203862038720388203892039020391203922039320394203952039620397203982039920400204012040220403204042040520406204072040820409204102041120412204132041420415204162041720418204192042020421204222042320424204252042620427204282042920430204312043220433204342043520436204372043820439204402044120442204432044420445204462044720448204492045020451204522045320454204552045620457204582045920460204612046220463204642046520466204672046820469204702047120472204732047420475204762047720478204792048020481204822048320484204852048620487204882048920490204912049220493204942049520496204972049820499205002050120502205032050420505205062050720508205092051020511205122051320514205152051620517205182051920520205212052220523205242052520526205272052820529205302053120532205332053420535205362053720538205392054020541205422054320544205452054620547205482054920550205512055220553205542055520556205572055820559205602056120562205632056420565205662056720568205692057020571205722057320574205752057620577205782057920580205812058220583205842058520586205872058820589205902059120592205932059420595205962059720598205992060020601206022060320604206052060620607206082060920610206112061220613206142061520616206172061820619206202062120622206232062420625206262062720628206292063020631206322063320634206352063620637206382063920640206412064220643206442064520646206472064820649206502065120652206532065420655206562065720658206592066020661206622066320664206652066620667206682066920670206712067220673206742067520676206772067820679206802068120682206832068420685206862068720688206892069020691206922069320694206952069620697206982069920700207012070220703207042070520706207072070820709207102071120712207132071420715207162071720718207192072020721207222072320724207252072620727207282072920730207312073220733207342073520736207372073820739207402074120742207432074420745207462074720748207492075020751207522075320754207552075620757207582075920760207612076220763207642076520766207672076820769207702077120772207732077420775207762077720778207792078020781207822078320784207852078620787207882078920790207912079220793
  1. /* 32-bit ELF support for ARM
  2. Copyright (C) 1998-2022 Free Software Foundation, Inc.
  3. This file is part of BFD, the Binary File Descriptor library.
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 3 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
  15. MA 02110-1301, USA. */
  16. #include "sysdep.h"
  17. #include <limits.h>
  18. #include "bfd.h"
  19. #include "libiberty.h"
  20. #include "libbfd.h"
  21. #include "elf-bfd.h"
  22. #include "elf-nacl.h"
  23. #include "elf-vxworks.h"
  24. #include "elf/arm.h"
  25. #include "elf32-arm.h"
  26. #include "cpu-arm.h"
  27. /* Return the relocation section associated with NAME. HTAB is the
  28. bfd's elf32_arm_link_hash_entry. */
  29. #define RELOC_SECTION(HTAB, NAME) \
  30. ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
  31. /* Return size of a relocation entry. HTAB is the bfd's
  32. elf32_arm_link_hash_entry. */
  33. #define RELOC_SIZE(HTAB) \
  34. ((HTAB)->use_rel \
  35. ? sizeof (Elf32_External_Rel) \
  36. : sizeof (Elf32_External_Rela))
  37. /* Return function to swap relocations in. HTAB is the bfd's
  38. elf32_arm_link_hash_entry. */
  39. #define SWAP_RELOC_IN(HTAB) \
  40. ((HTAB)->use_rel \
  41. ? bfd_elf32_swap_reloc_in \
  42. : bfd_elf32_swap_reloca_in)
  43. /* Return function to swap relocations out. HTAB is the bfd's
  44. elf32_arm_link_hash_entry. */
  45. #define SWAP_RELOC_OUT(HTAB) \
  46. ((HTAB)->use_rel \
  47. ? bfd_elf32_swap_reloc_out \
  48. : bfd_elf32_swap_reloca_out)
  49. #define elf_info_to_howto NULL
  50. #define elf_info_to_howto_rel elf32_arm_info_to_howto
  51. #define ARM_ELF_ABI_VERSION 0
  52. #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
  53. /* The Adjusted Place, as defined by AAELF. */
  54. #define Pa(X) ((X) & 0xfffffffc)
  55. static bool elf32_arm_write_section (bfd *output_bfd,
  56. struct bfd_link_info *link_info,
  57. asection *sec,
  58. bfd_byte *contents);
  59. /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
  60. R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
  61. in that slot. */
  62. static reloc_howto_type elf32_arm_howto_table_1[] =
  63. {
  64. /* No relocation. */
  65. HOWTO (R_ARM_NONE, /* type */
  66. 0, /* rightshift */
  67. 3, /* size (0 = byte, 1 = short, 2 = long) */
  68. 0, /* bitsize */
  69. false, /* pc_relative */
  70. 0, /* bitpos */
  71. complain_overflow_dont,/* complain_on_overflow */
  72. bfd_elf_generic_reloc, /* special_function */
  73. "R_ARM_NONE", /* name */
  74. false, /* partial_inplace */
  75. 0, /* src_mask */
  76. 0, /* dst_mask */
  77. false), /* pcrel_offset */
  78. HOWTO (R_ARM_PC24, /* type */
  79. 2, /* rightshift */
  80. 2, /* size (0 = byte, 1 = short, 2 = long) */
  81. 24, /* bitsize */
  82. true, /* pc_relative */
  83. 0, /* bitpos */
  84. complain_overflow_signed,/* complain_on_overflow */
  85. bfd_elf_generic_reloc, /* special_function */
  86. "R_ARM_PC24", /* name */
  87. false, /* partial_inplace */
  88. 0x00ffffff, /* src_mask */
  89. 0x00ffffff, /* dst_mask */
  90. true), /* pcrel_offset */
  91. /* 32 bit absolute */
  92. HOWTO (R_ARM_ABS32, /* type */
  93. 0, /* rightshift */
  94. 2, /* size (0 = byte, 1 = short, 2 = long) */
  95. 32, /* bitsize */
  96. false, /* pc_relative */
  97. 0, /* bitpos */
  98. complain_overflow_bitfield,/* complain_on_overflow */
  99. bfd_elf_generic_reloc, /* special_function */
  100. "R_ARM_ABS32", /* name */
  101. false, /* partial_inplace */
  102. 0xffffffff, /* src_mask */
  103. 0xffffffff, /* dst_mask */
  104. false), /* pcrel_offset */
  105. /* standard 32bit pc-relative reloc */
  106. HOWTO (R_ARM_REL32, /* type */
  107. 0, /* rightshift */
  108. 2, /* size (0 = byte, 1 = short, 2 = long) */
  109. 32, /* bitsize */
  110. true, /* pc_relative */
  111. 0, /* bitpos */
  112. complain_overflow_bitfield,/* complain_on_overflow */
  113. bfd_elf_generic_reloc, /* special_function */
  114. "R_ARM_REL32", /* name */
  115. false, /* partial_inplace */
  116. 0xffffffff, /* src_mask */
  117. 0xffffffff, /* dst_mask */
  118. true), /* pcrel_offset */
  119. /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
  120. HOWTO (R_ARM_LDR_PC_G0, /* type */
  121. 0, /* rightshift */
  122. 0, /* size (0 = byte, 1 = short, 2 = long) */
  123. 32, /* bitsize */
  124. true, /* pc_relative */
  125. 0, /* bitpos */
  126. complain_overflow_dont,/* complain_on_overflow */
  127. bfd_elf_generic_reloc, /* special_function */
  128. "R_ARM_LDR_PC_G0", /* name */
  129. false, /* partial_inplace */
  130. 0xffffffff, /* src_mask */
  131. 0xffffffff, /* dst_mask */
  132. true), /* pcrel_offset */
  133. /* 16 bit absolute */
  134. HOWTO (R_ARM_ABS16, /* type */
  135. 0, /* rightshift */
  136. 1, /* size (0 = byte, 1 = short, 2 = long) */
  137. 16, /* bitsize */
  138. false, /* pc_relative */
  139. 0, /* bitpos */
  140. complain_overflow_bitfield,/* complain_on_overflow */
  141. bfd_elf_generic_reloc, /* special_function */
  142. "R_ARM_ABS16", /* name */
  143. false, /* partial_inplace */
  144. 0x0000ffff, /* src_mask */
  145. 0x0000ffff, /* dst_mask */
  146. false), /* pcrel_offset */
  147. /* 12 bit absolute */
  148. HOWTO (R_ARM_ABS12, /* type */
  149. 0, /* rightshift */
  150. 2, /* size (0 = byte, 1 = short, 2 = long) */
  151. 12, /* bitsize */
  152. false, /* pc_relative */
  153. 0, /* bitpos */
  154. complain_overflow_bitfield,/* complain_on_overflow */
  155. bfd_elf_generic_reloc, /* special_function */
  156. "R_ARM_ABS12", /* name */
  157. false, /* partial_inplace */
  158. 0x00000fff, /* src_mask */
  159. 0x00000fff, /* dst_mask */
  160. false), /* pcrel_offset */
  161. HOWTO (R_ARM_THM_ABS5, /* type */
  162. 6, /* rightshift */
  163. 1, /* size (0 = byte, 1 = short, 2 = long) */
  164. 5, /* bitsize */
  165. false, /* pc_relative */
  166. 0, /* bitpos */
  167. complain_overflow_bitfield,/* complain_on_overflow */
  168. bfd_elf_generic_reloc, /* special_function */
  169. "R_ARM_THM_ABS5", /* name */
  170. false, /* partial_inplace */
  171. 0x000007e0, /* src_mask */
  172. 0x000007e0, /* dst_mask */
  173. false), /* pcrel_offset */
  174. /* 8 bit absolute */
  175. HOWTO (R_ARM_ABS8, /* type */
  176. 0, /* rightshift */
  177. 0, /* size (0 = byte, 1 = short, 2 = long) */
  178. 8, /* bitsize */
  179. false, /* pc_relative */
  180. 0, /* bitpos */
  181. complain_overflow_bitfield,/* complain_on_overflow */
  182. bfd_elf_generic_reloc, /* special_function */
  183. "R_ARM_ABS8", /* name */
  184. false, /* partial_inplace */
  185. 0x000000ff, /* src_mask */
  186. 0x000000ff, /* dst_mask */
  187. false), /* pcrel_offset */
  188. HOWTO (R_ARM_SBREL32, /* type */
  189. 0, /* rightshift */
  190. 2, /* size (0 = byte, 1 = short, 2 = long) */
  191. 32, /* bitsize */
  192. false, /* pc_relative */
  193. 0, /* bitpos */
  194. complain_overflow_dont,/* complain_on_overflow */
  195. bfd_elf_generic_reloc, /* special_function */
  196. "R_ARM_SBREL32", /* name */
  197. false, /* partial_inplace */
  198. 0xffffffff, /* src_mask */
  199. 0xffffffff, /* dst_mask */
  200. false), /* pcrel_offset */
  201. HOWTO (R_ARM_THM_CALL, /* type */
  202. 1, /* rightshift */
  203. 2, /* size (0 = byte, 1 = short, 2 = long) */
  204. 24, /* bitsize */
  205. true, /* pc_relative */
  206. 0, /* bitpos */
  207. complain_overflow_signed,/* complain_on_overflow */
  208. bfd_elf_generic_reloc, /* special_function */
  209. "R_ARM_THM_CALL", /* name */
  210. false, /* partial_inplace */
  211. 0x07ff2fff, /* src_mask */
  212. 0x07ff2fff, /* dst_mask */
  213. true), /* pcrel_offset */
  214. HOWTO (R_ARM_THM_PC8, /* type */
  215. 1, /* rightshift */
  216. 1, /* size (0 = byte, 1 = short, 2 = long) */
  217. 8, /* bitsize */
  218. true, /* pc_relative */
  219. 0, /* bitpos */
  220. complain_overflow_signed,/* complain_on_overflow */
  221. bfd_elf_generic_reloc, /* special_function */
  222. "R_ARM_THM_PC8", /* name */
  223. false, /* partial_inplace */
  224. 0x000000ff, /* src_mask */
  225. 0x000000ff, /* dst_mask */
  226. true), /* pcrel_offset */
  227. HOWTO (R_ARM_BREL_ADJ, /* type */
  228. 1, /* rightshift */
  229. 1, /* size (0 = byte, 1 = short, 2 = long) */
  230. 32, /* bitsize */
  231. false, /* pc_relative */
  232. 0, /* bitpos */
  233. complain_overflow_signed,/* complain_on_overflow */
  234. bfd_elf_generic_reloc, /* special_function */
  235. "R_ARM_BREL_ADJ", /* name */
  236. false, /* partial_inplace */
  237. 0xffffffff, /* src_mask */
  238. 0xffffffff, /* dst_mask */
  239. false), /* pcrel_offset */
  240. HOWTO (R_ARM_TLS_DESC, /* type */
  241. 0, /* rightshift */
  242. 2, /* size (0 = byte, 1 = short, 2 = long) */
  243. 32, /* bitsize */
  244. false, /* pc_relative */
  245. 0, /* bitpos */
  246. complain_overflow_bitfield,/* complain_on_overflow */
  247. bfd_elf_generic_reloc, /* special_function */
  248. "R_ARM_TLS_DESC", /* name */
  249. false, /* partial_inplace */
  250. 0xffffffff, /* src_mask */
  251. 0xffffffff, /* dst_mask */
  252. false), /* pcrel_offset */
  253. HOWTO (R_ARM_THM_SWI8, /* type */
  254. 0, /* rightshift */
  255. 0, /* size (0 = byte, 1 = short, 2 = long) */
  256. 0, /* bitsize */
  257. false, /* pc_relative */
  258. 0, /* bitpos */
  259. complain_overflow_signed,/* complain_on_overflow */
  260. bfd_elf_generic_reloc, /* special_function */
  261. "R_ARM_SWI8", /* name */
  262. false, /* partial_inplace */
  263. 0x00000000, /* src_mask */
  264. 0x00000000, /* dst_mask */
  265. false), /* pcrel_offset */
  266. /* BLX instruction for the ARM. */
  267. HOWTO (R_ARM_XPC25, /* type */
  268. 2, /* rightshift */
  269. 2, /* size (0 = byte, 1 = short, 2 = long) */
  270. 24, /* bitsize */
  271. true, /* pc_relative */
  272. 0, /* bitpos */
  273. complain_overflow_signed,/* complain_on_overflow */
  274. bfd_elf_generic_reloc, /* special_function */
  275. "R_ARM_XPC25", /* name */
  276. false, /* partial_inplace */
  277. 0x00ffffff, /* src_mask */
  278. 0x00ffffff, /* dst_mask */
  279. true), /* pcrel_offset */
  280. /* BLX instruction for the Thumb. */
  281. HOWTO (R_ARM_THM_XPC22, /* type */
  282. 2, /* rightshift */
  283. 2, /* size (0 = byte, 1 = short, 2 = long) */
  284. 24, /* bitsize */
  285. true, /* pc_relative */
  286. 0, /* bitpos */
  287. complain_overflow_signed,/* complain_on_overflow */
  288. bfd_elf_generic_reloc, /* special_function */
  289. "R_ARM_THM_XPC22", /* name */
  290. false, /* partial_inplace */
  291. 0x07ff2fff, /* src_mask */
  292. 0x07ff2fff, /* dst_mask */
  293. true), /* pcrel_offset */
  294. /* Dynamic TLS relocations. */
  295. HOWTO (R_ARM_TLS_DTPMOD32, /* type */
  296. 0, /* rightshift */
  297. 2, /* size (0 = byte, 1 = short, 2 = long) */
  298. 32, /* bitsize */
  299. false, /* pc_relative */
  300. 0, /* bitpos */
  301. complain_overflow_bitfield,/* complain_on_overflow */
  302. bfd_elf_generic_reloc, /* special_function */
  303. "R_ARM_TLS_DTPMOD32", /* name */
  304. true, /* partial_inplace */
  305. 0xffffffff, /* src_mask */
  306. 0xffffffff, /* dst_mask */
  307. false), /* pcrel_offset */
  308. HOWTO (R_ARM_TLS_DTPOFF32, /* type */
  309. 0, /* rightshift */
  310. 2, /* size (0 = byte, 1 = short, 2 = long) */
  311. 32, /* bitsize */
  312. false, /* pc_relative */
  313. 0, /* bitpos */
  314. complain_overflow_bitfield,/* complain_on_overflow */
  315. bfd_elf_generic_reloc, /* special_function */
  316. "R_ARM_TLS_DTPOFF32", /* name */
  317. true, /* partial_inplace */
  318. 0xffffffff, /* src_mask */
  319. 0xffffffff, /* dst_mask */
  320. false), /* pcrel_offset */
  321. HOWTO (R_ARM_TLS_TPOFF32, /* type */
  322. 0, /* rightshift */
  323. 2, /* size (0 = byte, 1 = short, 2 = long) */
  324. 32, /* bitsize */
  325. false, /* pc_relative */
  326. 0, /* bitpos */
  327. complain_overflow_bitfield,/* complain_on_overflow */
  328. bfd_elf_generic_reloc, /* special_function */
  329. "R_ARM_TLS_TPOFF32", /* name */
  330. true, /* partial_inplace */
  331. 0xffffffff, /* src_mask */
  332. 0xffffffff, /* dst_mask */
  333. false), /* pcrel_offset */
  334. /* Relocs used in ARM Linux */
  335. HOWTO (R_ARM_COPY, /* type */
  336. 0, /* rightshift */
  337. 2, /* size (0 = byte, 1 = short, 2 = long) */
  338. 32, /* bitsize */
  339. false, /* pc_relative */
  340. 0, /* bitpos */
  341. complain_overflow_bitfield,/* complain_on_overflow */
  342. bfd_elf_generic_reloc, /* special_function */
  343. "R_ARM_COPY", /* name */
  344. true, /* partial_inplace */
  345. 0xffffffff, /* src_mask */
  346. 0xffffffff, /* dst_mask */
  347. false), /* pcrel_offset */
  348. HOWTO (R_ARM_GLOB_DAT, /* type */
  349. 0, /* rightshift */
  350. 2, /* size (0 = byte, 1 = short, 2 = long) */
  351. 32, /* bitsize */
  352. false, /* pc_relative */
  353. 0, /* bitpos */
  354. complain_overflow_bitfield,/* complain_on_overflow */
  355. bfd_elf_generic_reloc, /* special_function */
  356. "R_ARM_GLOB_DAT", /* name */
  357. true, /* partial_inplace */
  358. 0xffffffff, /* src_mask */
  359. 0xffffffff, /* dst_mask */
  360. false), /* pcrel_offset */
  361. HOWTO (R_ARM_JUMP_SLOT, /* type */
  362. 0, /* rightshift */
  363. 2, /* size (0 = byte, 1 = short, 2 = long) */
  364. 32, /* bitsize */
  365. false, /* pc_relative */
  366. 0, /* bitpos */
  367. complain_overflow_bitfield,/* complain_on_overflow */
  368. bfd_elf_generic_reloc, /* special_function */
  369. "R_ARM_JUMP_SLOT", /* name */
  370. true, /* partial_inplace */
  371. 0xffffffff, /* src_mask */
  372. 0xffffffff, /* dst_mask */
  373. false), /* pcrel_offset */
  374. HOWTO (R_ARM_RELATIVE, /* type */
  375. 0, /* rightshift */
  376. 2, /* size (0 = byte, 1 = short, 2 = long) */
  377. 32, /* bitsize */
  378. false, /* pc_relative */
  379. 0, /* bitpos */
  380. complain_overflow_bitfield,/* complain_on_overflow */
  381. bfd_elf_generic_reloc, /* special_function */
  382. "R_ARM_RELATIVE", /* name */
  383. true, /* partial_inplace */
  384. 0xffffffff, /* src_mask */
  385. 0xffffffff, /* dst_mask */
  386. false), /* pcrel_offset */
  387. HOWTO (R_ARM_GOTOFF32, /* type */
  388. 0, /* rightshift */
  389. 2, /* size (0 = byte, 1 = short, 2 = long) */
  390. 32, /* bitsize */
  391. false, /* pc_relative */
  392. 0, /* bitpos */
  393. complain_overflow_bitfield,/* complain_on_overflow */
  394. bfd_elf_generic_reloc, /* special_function */
  395. "R_ARM_GOTOFF32", /* name */
  396. true, /* partial_inplace */
  397. 0xffffffff, /* src_mask */
  398. 0xffffffff, /* dst_mask */
  399. false), /* pcrel_offset */
  400. HOWTO (R_ARM_GOTPC, /* type */
  401. 0, /* rightshift */
  402. 2, /* size (0 = byte, 1 = short, 2 = long) */
  403. 32, /* bitsize */
  404. true, /* pc_relative */
  405. 0, /* bitpos */
  406. complain_overflow_bitfield,/* complain_on_overflow */
  407. bfd_elf_generic_reloc, /* special_function */
  408. "R_ARM_GOTPC", /* name */
  409. true, /* partial_inplace */
  410. 0xffffffff, /* src_mask */
  411. 0xffffffff, /* dst_mask */
  412. true), /* pcrel_offset */
  413. HOWTO (R_ARM_GOT32, /* type */
  414. 0, /* rightshift */
  415. 2, /* size (0 = byte, 1 = short, 2 = long) */
  416. 32, /* bitsize */
  417. false, /* pc_relative */
  418. 0, /* bitpos */
  419. complain_overflow_bitfield,/* complain_on_overflow */
  420. bfd_elf_generic_reloc, /* special_function */
  421. "R_ARM_GOT32", /* name */
  422. true, /* partial_inplace */
  423. 0xffffffff, /* src_mask */
  424. 0xffffffff, /* dst_mask */
  425. false), /* pcrel_offset */
  426. HOWTO (R_ARM_PLT32, /* type */
  427. 2, /* rightshift */
  428. 2, /* size (0 = byte, 1 = short, 2 = long) */
  429. 24, /* bitsize */
  430. true, /* pc_relative */
  431. 0, /* bitpos */
  432. complain_overflow_bitfield,/* complain_on_overflow */
  433. bfd_elf_generic_reloc, /* special_function */
  434. "R_ARM_PLT32", /* name */
  435. false, /* partial_inplace */
  436. 0x00ffffff, /* src_mask */
  437. 0x00ffffff, /* dst_mask */
  438. true), /* pcrel_offset */
  439. HOWTO (R_ARM_CALL, /* type */
  440. 2, /* rightshift */
  441. 2, /* size (0 = byte, 1 = short, 2 = long) */
  442. 24, /* bitsize */
  443. true, /* pc_relative */
  444. 0, /* bitpos */
  445. complain_overflow_signed,/* complain_on_overflow */
  446. bfd_elf_generic_reloc, /* special_function */
  447. "R_ARM_CALL", /* name */
  448. false, /* partial_inplace */
  449. 0x00ffffff, /* src_mask */
  450. 0x00ffffff, /* dst_mask */
  451. true), /* pcrel_offset */
  452. HOWTO (R_ARM_JUMP24, /* type */
  453. 2, /* rightshift */
  454. 2, /* size (0 = byte, 1 = short, 2 = long) */
  455. 24, /* bitsize */
  456. true, /* pc_relative */
  457. 0, /* bitpos */
  458. complain_overflow_signed,/* complain_on_overflow */
  459. bfd_elf_generic_reloc, /* special_function */
  460. "R_ARM_JUMP24", /* name */
  461. false, /* partial_inplace */
  462. 0x00ffffff, /* src_mask */
  463. 0x00ffffff, /* dst_mask */
  464. true), /* pcrel_offset */
  465. HOWTO (R_ARM_THM_JUMP24, /* type */
  466. 1, /* rightshift */
  467. 2, /* size (0 = byte, 1 = short, 2 = long) */
  468. 24, /* bitsize */
  469. true, /* pc_relative */
  470. 0, /* bitpos */
  471. complain_overflow_signed,/* complain_on_overflow */
  472. bfd_elf_generic_reloc, /* special_function */
  473. "R_ARM_THM_JUMP24", /* name */
  474. false, /* partial_inplace */
  475. 0x07ff2fff, /* src_mask */
  476. 0x07ff2fff, /* dst_mask */
  477. true), /* pcrel_offset */
  478. HOWTO (R_ARM_BASE_ABS, /* type */
  479. 0, /* rightshift */
  480. 2, /* size (0 = byte, 1 = short, 2 = long) */
  481. 32, /* bitsize */
  482. false, /* pc_relative */
  483. 0, /* bitpos */
  484. complain_overflow_dont,/* complain_on_overflow */
  485. bfd_elf_generic_reloc, /* special_function */
  486. "R_ARM_BASE_ABS", /* name */
  487. false, /* partial_inplace */
  488. 0xffffffff, /* src_mask */
  489. 0xffffffff, /* dst_mask */
  490. false), /* pcrel_offset */
  491. HOWTO (R_ARM_ALU_PCREL7_0, /* type */
  492. 0, /* rightshift */
  493. 2, /* size (0 = byte, 1 = short, 2 = long) */
  494. 12, /* bitsize */
  495. true, /* pc_relative */
  496. 0, /* bitpos */
  497. complain_overflow_dont,/* complain_on_overflow */
  498. bfd_elf_generic_reloc, /* special_function */
  499. "R_ARM_ALU_PCREL_7_0", /* name */
  500. false, /* partial_inplace */
  501. 0x00000fff, /* src_mask */
  502. 0x00000fff, /* dst_mask */
  503. true), /* pcrel_offset */
  504. HOWTO (R_ARM_ALU_PCREL15_8, /* type */
  505. 0, /* rightshift */
  506. 2, /* size (0 = byte, 1 = short, 2 = long) */
  507. 12, /* bitsize */
  508. true, /* pc_relative */
  509. 8, /* bitpos */
  510. complain_overflow_dont,/* complain_on_overflow */
  511. bfd_elf_generic_reloc, /* special_function */
  512. "R_ARM_ALU_PCREL_15_8",/* name */
  513. false, /* partial_inplace */
  514. 0x00000fff, /* src_mask */
  515. 0x00000fff, /* dst_mask */
  516. true), /* pcrel_offset */
  517. HOWTO (R_ARM_ALU_PCREL23_15, /* type */
  518. 0, /* rightshift */
  519. 2, /* size (0 = byte, 1 = short, 2 = long) */
  520. 12, /* bitsize */
  521. true, /* pc_relative */
  522. 16, /* bitpos */
  523. complain_overflow_dont,/* complain_on_overflow */
  524. bfd_elf_generic_reloc, /* special_function */
  525. "R_ARM_ALU_PCREL_23_15",/* name */
  526. false, /* partial_inplace */
  527. 0x00000fff, /* src_mask */
  528. 0x00000fff, /* dst_mask */
  529. true), /* pcrel_offset */
  530. HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
  531. 0, /* rightshift */
  532. 2, /* size (0 = byte, 1 = short, 2 = long) */
  533. 12, /* bitsize */
  534. false, /* pc_relative */
  535. 0, /* bitpos */
  536. complain_overflow_dont,/* complain_on_overflow */
  537. bfd_elf_generic_reloc, /* special_function */
  538. "R_ARM_LDR_SBREL_11_0",/* name */
  539. false, /* partial_inplace */
  540. 0x00000fff, /* src_mask */
  541. 0x00000fff, /* dst_mask */
  542. false), /* pcrel_offset */
  543. HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
  544. 0, /* rightshift */
  545. 2, /* size (0 = byte, 1 = short, 2 = long) */
  546. 8, /* bitsize */
  547. false, /* pc_relative */
  548. 12, /* bitpos */
  549. complain_overflow_dont,/* complain_on_overflow */
  550. bfd_elf_generic_reloc, /* special_function */
  551. "R_ARM_ALU_SBREL_19_12",/* name */
  552. false, /* partial_inplace */
  553. 0x000ff000, /* src_mask */
  554. 0x000ff000, /* dst_mask */
  555. false), /* pcrel_offset */
  556. HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
  557. 0, /* rightshift */
  558. 2, /* size (0 = byte, 1 = short, 2 = long) */
  559. 8, /* bitsize */
  560. false, /* pc_relative */
  561. 20, /* bitpos */
  562. complain_overflow_dont,/* complain_on_overflow */
  563. bfd_elf_generic_reloc, /* special_function */
  564. "R_ARM_ALU_SBREL_27_20",/* name */
  565. false, /* partial_inplace */
  566. 0x0ff00000, /* src_mask */
  567. 0x0ff00000, /* dst_mask */
  568. false), /* pcrel_offset */
  569. HOWTO (R_ARM_TARGET1, /* type */
  570. 0, /* rightshift */
  571. 2, /* size (0 = byte, 1 = short, 2 = long) */
  572. 32, /* bitsize */
  573. false, /* pc_relative */
  574. 0, /* bitpos */
  575. complain_overflow_dont,/* complain_on_overflow */
  576. bfd_elf_generic_reloc, /* special_function */
  577. "R_ARM_TARGET1", /* name */
  578. false, /* partial_inplace */
  579. 0xffffffff, /* src_mask */
  580. 0xffffffff, /* dst_mask */
  581. false), /* pcrel_offset */
  582. HOWTO (R_ARM_ROSEGREL32, /* type */
  583. 0, /* rightshift */
  584. 2, /* size (0 = byte, 1 = short, 2 = long) */
  585. 32, /* bitsize */
  586. false, /* pc_relative */
  587. 0, /* bitpos */
  588. complain_overflow_dont,/* complain_on_overflow */
  589. bfd_elf_generic_reloc, /* special_function */
  590. "R_ARM_ROSEGREL32", /* name */
  591. false, /* partial_inplace */
  592. 0xffffffff, /* src_mask */
  593. 0xffffffff, /* dst_mask */
  594. false), /* pcrel_offset */
  595. HOWTO (R_ARM_V4BX, /* type */
  596. 0, /* rightshift */
  597. 2, /* size (0 = byte, 1 = short, 2 = long) */
  598. 32, /* bitsize */
  599. false, /* pc_relative */
  600. 0, /* bitpos */
  601. complain_overflow_dont,/* complain_on_overflow */
  602. bfd_elf_generic_reloc, /* special_function */
  603. "R_ARM_V4BX", /* name */
  604. false, /* partial_inplace */
  605. 0xffffffff, /* src_mask */
  606. 0xffffffff, /* dst_mask */
  607. false), /* pcrel_offset */
  608. HOWTO (R_ARM_TARGET2, /* type */
  609. 0, /* rightshift */
  610. 2, /* size (0 = byte, 1 = short, 2 = long) */
  611. 32, /* bitsize */
  612. false, /* pc_relative */
  613. 0, /* bitpos */
  614. complain_overflow_signed,/* complain_on_overflow */
  615. bfd_elf_generic_reloc, /* special_function */
  616. "R_ARM_TARGET2", /* name */
  617. false, /* partial_inplace */
  618. 0xffffffff, /* src_mask */
  619. 0xffffffff, /* dst_mask */
  620. true), /* pcrel_offset */
  621. HOWTO (R_ARM_PREL31, /* type */
  622. 0, /* rightshift */
  623. 2, /* size (0 = byte, 1 = short, 2 = long) */
  624. 31, /* bitsize */
  625. true, /* pc_relative */
  626. 0, /* bitpos */
  627. complain_overflow_signed,/* complain_on_overflow */
  628. bfd_elf_generic_reloc, /* special_function */
  629. "R_ARM_PREL31", /* name */
  630. false, /* partial_inplace */
  631. 0x7fffffff, /* src_mask */
  632. 0x7fffffff, /* dst_mask */
  633. true), /* pcrel_offset */
  634. HOWTO (R_ARM_MOVW_ABS_NC, /* type */
  635. 0, /* rightshift */
  636. 2, /* size (0 = byte, 1 = short, 2 = long) */
  637. 16, /* bitsize */
  638. false, /* pc_relative */
  639. 0, /* bitpos */
  640. complain_overflow_dont,/* complain_on_overflow */
  641. bfd_elf_generic_reloc, /* special_function */
  642. "R_ARM_MOVW_ABS_NC", /* name */
  643. false, /* partial_inplace */
  644. 0x000f0fff, /* src_mask */
  645. 0x000f0fff, /* dst_mask */
  646. false), /* pcrel_offset */
  647. HOWTO (R_ARM_MOVT_ABS, /* type */
  648. 0, /* rightshift */
  649. 2, /* size (0 = byte, 1 = short, 2 = long) */
  650. 16, /* bitsize */
  651. false, /* pc_relative */
  652. 0, /* bitpos */
  653. complain_overflow_bitfield,/* complain_on_overflow */
  654. bfd_elf_generic_reloc, /* special_function */
  655. "R_ARM_MOVT_ABS", /* name */
  656. false, /* partial_inplace */
  657. 0x000f0fff, /* src_mask */
  658. 0x000f0fff, /* dst_mask */
  659. false), /* pcrel_offset */
  660. HOWTO (R_ARM_MOVW_PREL_NC, /* type */
  661. 0, /* rightshift */
  662. 2, /* size (0 = byte, 1 = short, 2 = long) */
  663. 16, /* bitsize */
  664. true, /* pc_relative */
  665. 0, /* bitpos */
  666. complain_overflow_dont,/* complain_on_overflow */
  667. bfd_elf_generic_reloc, /* special_function */
  668. "R_ARM_MOVW_PREL_NC", /* name */
  669. false, /* partial_inplace */
  670. 0x000f0fff, /* src_mask */
  671. 0x000f0fff, /* dst_mask */
  672. true), /* pcrel_offset */
  673. HOWTO (R_ARM_MOVT_PREL, /* type */
  674. 0, /* rightshift */
  675. 2, /* size (0 = byte, 1 = short, 2 = long) */
  676. 16, /* bitsize */
  677. true, /* pc_relative */
  678. 0, /* bitpos */
  679. complain_overflow_bitfield,/* complain_on_overflow */
  680. bfd_elf_generic_reloc, /* special_function */
  681. "R_ARM_MOVT_PREL", /* name */
  682. false, /* partial_inplace */
  683. 0x000f0fff, /* src_mask */
  684. 0x000f0fff, /* dst_mask */
  685. true), /* pcrel_offset */
  686. HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
  687. 0, /* rightshift */
  688. 2, /* size (0 = byte, 1 = short, 2 = long) */
  689. 16, /* bitsize */
  690. false, /* pc_relative */
  691. 0, /* bitpos */
  692. complain_overflow_dont,/* complain_on_overflow */
  693. bfd_elf_generic_reloc, /* special_function */
  694. "R_ARM_THM_MOVW_ABS_NC",/* name */
  695. false, /* partial_inplace */
  696. 0x040f70ff, /* src_mask */
  697. 0x040f70ff, /* dst_mask */
  698. false), /* pcrel_offset */
  699. HOWTO (R_ARM_THM_MOVT_ABS, /* type */
  700. 0, /* rightshift */
  701. 2, /* size (0 = byte, 1 = short, 2 = long) */
  702. 16, /* bitsize */
  703. false, /* pc_relative */
  704. 0, /* bitpos */
  705. complain_overflow_bitfield,/* complain_on_overflow */
  706. bfd_elf_generic_reloc, /* special_function */
  707. "R_ARM_THM_MOVT_ABS", /* name */
  708. false, /* partial_inplace */
  709. 0x040f70ff, /* src_mask */
  710. 0x040f70ff, /* dst_mask */
  711. false), /* pcrel_offset */
  712. HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
  713. 0, /* rightshift */
  714. 2, /* size (0 = byte, 1 = short, 2 = long) */
  715. 16, /* bitsize */
  716. true, /* pc_relative */
  717. 0, /* bitpos */
  718. complain_overflow_dont,/* complain_on_overflow */
  719. bfd_elf_generic_reloc, /* special_function */
  720. "R_ARM_THM_MOVW_PREL_NC",/* name */
  721. false, /* partial_inplace */
  722. 0x040f70ff, /* src_mask */
  723. 0x040f70ff, /* dst_mask */
  724. true), /* pcrel_offset */
  725. HOWTO (R_ARM_THM_MOVT_PREL, /* type */
  726. 0, /* rightshift */
  727. 2, /* size (0 = byte, 1 = short, 2 = long) */
  728. 16, /* bitsize */
  729. true, /* pc_relative */
  730. 0, /* bitpos */
  731. complain_overflow_bitfield,/* complain_on_overflow */
  732. bfd_elf_generic_reloc, /* special_function */
  733. "R_ARM_THM_MOVT_PREL", /* name */
  734. false, /* partial_inplace */
  735. 0x040f70ff, /* src_mask */
  736. 0x040f70ff, /* dst_mask */
  737. true), /* pcrel_offset */
  738. HOWTO (R_ARM_THM_JUMP19, /* type */
  739. 1, /* rightshift */
  740. 2, /* size (0 = byte, 1 = short, 2 = long) */
  741. 19, /* bitsize */
  742. true, /* pc_relative */
  743. 0, /* bitpos */
  744. complain_overflow_signed,/* complain_on_overflow */
  745. bfd_elf_generic_reloc, /* special_function */
  746. "R_ARM_THM_JUMP19", /* name */
  747. false, /* partial_inplace */
  748. 0x043f2fff, /* src_mask */
  749. 0x043f2fff, /* dst_mask */
  750. true), /* pcrel_offset */
  751. HOWTO (R_ARM_THM_JUMP6, /* type */
  752. 1, /* rightshift */
  753. 1, /* size (0 = byte, 1 = short, 2 = long) */
  754. 6, /* bitsize */
  755. true, /* pc_relative */
  756. 0, /* bitpos */
  757. complain_overflow_unsigned,/* complain_on_overflow */
  758. bfd_elf_generic_reloc, /* special_function */
  759. "R_ARM_THM_JUMP6", /* name */
  760. false, /* partial_inplace */
  761. 0x02f8, /* src_mask */
  762. 0x02f8, /* dst_mask */
  763. true), /* pcrel_offset */
  764. /* These are declared as 13-bit signed relocations because we can
  765. address -4095 .. 4095(base) by altering ADDW to SUBW or vice
  766. versa. */
  767. HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
  768. 0, /* rightshift */
  769. 2, /* size (0 = byte, 1 = short, 2 = long) */
  770. 13, /* bitsize */
  771. true, /* pc_relative */
  772. 0, /* bitpos */
  773. complain_overflow_dont,/* complain_on_overflow */
  774. bfd_elf_generic_reloc, /* special_function */
  775. "R_ARM_THM_ALU_PREL_11_0",/* name */
  776. false, /* partial_inplace */
  777. 0xffffffff, /* src_mask */
  778. 0xffffffff, /* dst_mask */
  779. true), /* pcrel_offset */
  780. HOWTO (R_ARM_THM_PC12, /* type */
  781. 0, /* rightshift */
  782. 2, /* size (0 = byte, 1 = short, 2 = long) */
  783. 13, /* bitsize */
  784. true, /* pc_relative */
  785. 0, /* bitpos */
  786. complain_overflow_dont,/* complain_on_overflow */
  787. bfd_elf_generic_reloc, /* special_function */
  788. "R_ARM_THM_PC12", /* name */
  789. false, /* partial_inplace */
  790. 0xffffffff, /* src_mask */
  791. 0xffffffff, /* dst_mask */
  792. true), /* pcrel_offset */
  793. HOWTO (R_ARM_ABS32_NOI, /* type */
  794. 0, /* rightshift */
  795. 2, /* size (0 = byte, 1 = short, 2 = long) */
  796. 32, /* bitsize */
  797. false, /* pc_relative */
  798. 0, /* bitpos */
  799. complain_overflow_dont,/* complain_on_overflow */
  800. bfd_elf_generic_reloc, /* special_function */
  801. "R_ARM_ABS32_NOI", /* name */
  802. false, /* partial_inplace */
  803. 0xffffffff, /* src_mask */
  804. 0xffffffff, /* dst_mask */
  805. false), /* pcrel_offset */
  806. HOWTO (R_ARM_REL32_NOI, /* type */
  807. 0, /* rightshift */
  808. 2, /* size (0 = byte, 1 = short, 2 = long) */
  809. 32, /* bitsize */
  810. true, /* pc_relative */
  811. 0, /* bitpos */
  812. complain_overflow_dont,/* complain_on_overflow */
  813. bfd_elf_generic_reloc, /* special_function */
  814. "R_ARM_REL32_NOI", /* name */
  815. false, /* partial_inplace */
  816. 0xffffffff, /* src_mask */
  817. 0xffffffff, /* dst_mask */
  818. false), /* pcrel_offset */
  819. /* Group relocations. */
  820. HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
  821. 0, /* rightshift */
  822. 2, /* size (0 = byte, 1 = short, 2 = long) */
  823. 32, /* bitsize */
  824. true, /* pc_relative */
  825. 0, /* bitpos */
  826. complain_overflow_dont,/* complain_on_overflow */
  827. bfd_elf_generic_reloc, /* special_function */
  828. "R_ARM_ALU_PC_G0_NC", /* name */
  829. false, /* partial_inplace */
  830. 0xffffffff, /* src_mask */
  831. 0xffffffff, /* dst_mask */
  832. true), /* pcrel_offset */
  833. HOWTO (R_ARM_ALU_PC_G0, /* type */
  834. 0, /* rightshift */
  835. 2, /* size (0 = byte, 1 = short, 2 = long) */
  836. 32, /* bitsize */
  837. true, /* pc_relative */
  838. 0, /* bitpos */
  839. complain_overflow_dont,/* complain_on_overflow */
  840. bfd_elf_generic_reloc, /* special_function */
  841. "R_ARM_ALU_PC_G0", /* name */
  842. false, /* partial_inplace */
  843. 0xffffffff, /* src_mask */
  844. 0xffffffff, /* dst_mask */
  845. true), /* pcrel_offset */
  846. HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
  847. 0, /* rightshift */
  848. 2, /* size (0 = byte, 1 = short, 2 = long) */
  849. 32, /* bitsize */
  850. true, /* pc_relative */
  851. 0, /* bitpos */
  852. complain_overflow_dont,/* complain_on_overflow */
  853. bfd_elf_generic_reloc, /* special_function */
  854. "R_ARM_ALU_PC_G1_NC", /* name */
  855. false, /* partial_inplace */
  856. 0xffffffff, /* src_mask */
  857. 0xffffffff, /* dst_mask */
  858. true), /* pcrel_offset */
  859. HOWTO (R_ARM_ALU_PC_G1, /* type */
  860. 0, /* rightshift */
  861. 2, /* size (0 = byte, 1 = short, 2 = long) */
  862. 32, /* bitsize */
  863. true, /* pc_relative */
  864. 0, /* bitpos */
  865. complain_overflow_dont,/* complain_on_overflow */
  866. bfd_elf_generic_reloc, /* special_function */
  867. "R_ARM_ALU_PC_G1", /* name */
  868. false, /* partial_inplace */
  869. 0xffffffff, /* src_mask */
  870. 0xffffffff, /* dst_mask */
  871. true), /* pcrel_offset */
  872. HOWTO (R_ARM_ALU_PC_G2, /* type */
  873. 0, /* rightshift */
  874. 2, /* size (0 = byte, 1 = short, 2 = long) */
  875. 32, /* bitsize */
  876. true, /* pc_relative */
  877. 0, /* bitpos */
  878. complain_overflow_dont,/* complain_on_overflow */
  879. bfd_elf_generic_reloc, /* special_function */
  880. "R_ARM_ALU_PC_G2", /* name */
  881. false, /* partial_inplace */
  882. 0xffffffff, /* src_mask */
  883. 0xffffffff, /* dst_mask */
  884. true), /* pcrel_offset */
  885. HOWTO (R_ARM_LDR_PC_G1, /* type */
  886. 0, /* rightshift */
  887. 2, /* size (0 = byte, 1 = short, 2 = long) */
  888. 32, /* bitsize */
  889. true, /* pc_relative */
  890. 0, /* bitpos */
  891. complain_overflow_dont,/* complain_on_overflow */
  892. bfd_elf_generic_reloc, /* special_function */
  893. "R_ARM_LDR_PC_G1", /* name */
  894. false, /* partial_inplace */
  895. 0xffffffff, /* src_mask */
  896. 0xffffffff, /* dst_mask */
  897. true), /* pcrel_offset */
  898. HOWTO (R_ARM_LDR_PC_G2, /* type */
  899. 0, /* rightshift */
  900. 2, /* size (0 = byte, 1 = short, 2 = long) */
  901. 32, /* bitsize */
  902. true, /* pc_relative */
  903. 0, /* bitpos */
  904. complain_overflow_dont,/* complain_on_overflow */
  905. bfd_elf_generic_reloc, /* special_function */
  906. "R_ARM_LDR_PC_G2", /* name */
  907. false, /* partial_inplace */
  908. 0xffffffff, /* src_mask */
  909. 0xffffffff, /* dst_mask */
  910. true), /* pcrel_offset */
  911. HOWTO (R_ARM_LDRS_PC_G0, /* type */
  912. 0, /* rightshift */
  913. 2, /* size (0 = byte, 1 = short, 2 = long) */
  914. 32, /* bitsize */
  915. true, /* pc_relative */
  916. 0, /* bitpos */
  917. complain_overflow_dont,/* complain_on_overflow */
  918. bfd_elf_generic_reloc, /* special_function */
  919. "R_ARM_LDRS_PC_G0", /* name */
  920. false, /* partial_inplace */
  921. 0xffffffff, /* src_mask */
  922. 0xffffffff, /* dst_mask */
  923. true), /* pcrel_offset */
  924. HOWTO (R_ARM_LDRS_PC_G1, /* type */
  925. 0, /* rightshift */
  926. 2, /* size (0 = byte, 1 = short, 2 = long) */
  927. 32, /* bitsize */
  928. true, /* pc_relative */
  929. 0, /* bitpos */
  930. complain_overflow_dont,/* complain_on_overflow */
  931. bfd_elf_generic_reloc, /* special_function */
  932. "R_ARM_LDRS_PC_G1", /* name */
  933. false, /* partial_inplace */
  934. 0xffffffff, /* src_mask */
  935. 0xffffffff, /* dst_mask */
  936. true), /* pcrel_offset */
  937. HOWTO (R_ARM_LDRS_PC_G2, /* type */
  938. 0, /* rightshift */
  939. 2, /* size (0 = byte, 1 = short, 2 = long) */
  940. 32, /* bitsize */
  941. true, /* pc_relative */
  942. 0, /* bitpos */
  943. complain_overflow_dont,/* complain_on_overflow */
  944. bfd_elf_generic_reloc, /* special_function */
  945. "R_ARM_LDRS_PC_G2", /* name */
  946. false, /* partial_inplace */
  947. 0xffffffff, /* src_mask */
  948. 0xffffffff, /* dst_mask */
  949. true), /* pcrel_offset */
  950. HOWTO (R_ARM_LDC_PC_G0, /* type */
  951. 0, /* rightshift */
  952. 2, /* size (0 = byte, 1 = short, 2 = long) */
  953. 32, /* bitsize */
  954. true, /* pc_relative */
  955. 0, /* bitpos */
  956. complain_overflow_dont,/* complain_on_overflow */
  957. bfd_elf_generic_reloc, /* special_function */
  958. "R_ARM_LDC_PC_G0", /* name */
  959. false, /* partial_inplace */
  960. 0xffffffff, /* src_mask */
  961. 0xffffffff, /* dst_mask */
  962. true), /* pcrel_offset */
  963. HOWTO (R_ARM_LDC_PC_G1, /* type */
  964. 0, /* rightshift */
  965. 2, /* size (0 = byte, 1 = short, 2 = long) */
  966. 32, /* bitsize */
  967. true, /* pc_relative */
  968. 0, /* bitpos */
  969. complain_overflow_dont,/* complain_on_overflow */
  970. bfd_elf_generic_reloc, /* special_function */
  971. "R_ARM_LDC_PC_G1", /* name */
  972. false, /* partial_inplace */
  973. 0xffffffff, /* src_mask */
  974. 0xffffffff, /* dst_mask */
  975. true), /* pcrel_offset */
  976. HOWTO (R_ARM_LDC_PC_G2, /* type */
  977. 0, /* rightshift */
  978. 2, /* size (0 = byte, 1 = short, 2 = long) */
  979. 32, /* bitsize */
  980. true, /* pc_relative */
  981. 0, /* bitpos */
  982. complain_overflow_dont,/* complain_on_overflow */
  983. bfd_elf_generic_reloc, /* special_function */
  984. "R_ARM_LDC_PC_G2", /* name */
  985. false, /* partial_inplace */
  986. 0xffffffff, /* src_mask */
  987. 0xffffffff, /* dst_mask */
  988. true), /* pcrel_offset */
  989. HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
  990. 0, /* rightshift */
  991. 2, /* size (0 = byte, 1 = short, 2 = long) */
  992. 32, /* bitsize */
  993. true, /* pc_relative */
  994. 0, /* bitpos */
  995. complain_overflow_dont,/* complain_on_overflow */
  996. bfd_elf_generic_reloc, /* special_function */
  997. "R_ARM_ALU_SB_G0_NC", /* name */
  998. false, /* partial_inplace */
  999. 0xffffffff, /* src_mask */
  1000. 0xffffffff, /* dst_mask */
  1001. true), /* pcrel_offset */
  1002. HOWTO (R_ARM_ALU_SB_G0, /* type */
  1003. 0, /* rightshift */
  1004. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1005. 32, /* bitsize */
  1006. true, /* pc_relative */
  1007. 0, /* bitpos */
  1008. complain_overflow_dont,/* complain_on_overflow */
  1009. bfd_elf_generic_reloc, /* special_function */
  1010. "R_ARM_ALU_SB_G0", /* name */
  1011. false, /* partial_inplace */
  1012. 0xffffffff, /* src_mask */
  1013. 0xffffffff, /* dst_mask */
  1014. true), /* pcrel_offset */
  1015. HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
  1016. 0, /* rightshift */
  1017. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1018. 32, /* bitsize */
  1019. true, /* pc_relative */
  1020. 0, /* bitpos */
  1021. complain_overflow_dont,/* complain_on_overflow */
  1022. bfd_elf_generic_reloc, /* special_function */
  1023. "R_ARM_ALU_SB_G1_NC", /* name */
  1024. false, /* partial_inplace */
  1025. 0xffffffff, /* src_mask */
  1026. 0xffffffff, /* dst_mask */
  1027. true), /* pcrel_offset */
  1028. HOWTO (R_ARM_ALU_SB_G1, /* type */
  1029. 0, /* rightshift */
  1030. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1031. 32, /* bitsize */
  1032. true, /* pc_relative */
  1033. 0, /* bitpos */
  1034. complain_overflow_dont,/* complain_on_overflow */
  1035. bfd_elf_generic_reloc, /* special_function */
  1036. "R_ARM_ALU_SB_G1", /* name */
  1037. false, /* partial_inplace */
  1038. 0xffffffff, /* src_mask */
  1039. 0xffffffff, /* dst_mask */
  1040. true), /* pcrel_offset */
  1041. HOWTO (R_ARM_ALU_SB_G2, /* type */
  1042. 0, /* rightshift */
  1043. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1044. 32, /* bitsize */
  1045. true, /* pc_relative */
  1046. 0, /* bitpos */
  1047. complain_overflow_dont,/* complain_on_overflow */
  1048. bfd_elf_generic_reloc, /* special_function */
  1049. "R_ARM_ALU_SB_G2", /* name */
  1050. false, /* partial_inplace */
  1051. 0xffffffff, /* src_mask */
  1052. 0xffffffff, /* dst_mask */
  1053. true), /* pcrel_offset */
  1054. HOWTO (R_ARM_LDR_SB_G0, /* type */
  1055. 0, /* rightshift */
  1056. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1057. 32, /* bitsize */
  1058. true, /* pc_relative */
  1059. 0, /* bitpos */
  1060. complain_overflow_dont,/* complain_on_overflow */
  1061. bfd_elf_generic_reloc, /* special_function */
  1062. "R_ARM_LDR_SB_G0", /* name */
  1063. false, /* partial_inplace */
  1064. 0xffffffff, /* src_mask */
  1065. 0xffffffff, /* dst_mask */
  1066. true), /* pcrel_offset */
  1067. HOWTO (R_ARM_LDR_SB_G1, /* type */
  1068. 0, /* rightshift */
  1069. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1070. 32, /* bitsize */
  1071. true, /* pc_relative */
  1072. 0, /* bitpos */
  1073. complain_overflow_dont,/* complain_on_overflow */
  1074. bfd_elf_generic_reloc, /* special_function */
  1075. "R_ARM_LDR_SB_G1", /* name */
  1076. false, /* partial_inplace */
  1077. 0xffffffff, /* src_mask */
  1078. 0xffffffff, /* dst_mask */
  1079. true), /* pcrel_offset */
  1080. HOWTO (R_ARM_LDR_SB_G2, /* type */
  1081. 0, /* rightshift */
  1082. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1083. 32, /* bitsize */
  1084. true, /* pc_relative */
  1085. 0, /* bitpos */
  1086. complain_overflow_dont,/* complain_on_overflow */
  1087. bfd_elf_generic_reloc, /* special_function */
  1088. "R_ARM_LDR_SB_G2", /* name */
  1089. false, /* partial_inplace */
  1090. 0xffffffff, /* src_mask */
  1091. 0xffffffff, /* dst_mask */
  1092. true), /* pcrel_offset */
  1093. HOWTO (R_ARM_LDRS_SB_G0, /* type */
  1094. 0, /* rightshift */
  1095. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1096. 32, /* bitsize */
  1097. true, /* pc_relative */
  1098. 0, /* bitpos */
  1099. complain_overflow_dont,/* complain_on_overflow */
  1100. bfd_elf_generic_reloc, /* special_function */
  1101. "R_ARM_LDRS_SB_G0", /* name */
  1102. false, /* partial_inplace */
  1103. 0xffffffff, /* src_mask */
  1104. 0xffffffff, /* dst_mask */
  1105. true), /* pcrel_offset */
  1106. HOWTO (R_ARM_LDRS_SB_G1, /* type */
  1107. 0, /* rightshift */
  1108. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1109. 32, /* bitsize */
  1110. true, /* pc_relative */
  1111. 0, /* bitpos */
  1112. complain_overflow_dont,/* complain_on_overflow */
  1113. bfd_elf_generic_reloc, /* special_function */
  1114. "R_ARM_LDRS_SB_G1", /* name */
  1115. false, /* partial_inplace */
  1116. 0xffffffff, /* src_mask */
  1117. 0xffffffff, /* dst_mask */
  1118. true), /* pcrel_offset */
  1119. HOWTO (R_ARM_LDRS_SB_G2, /* type */
  1120. 0, /* rightshift */
  1121. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1122. 32, /* bitsize */
  1123. true, /* pc_relative */
  1124. 0, /* bitpos */
  1125. complain_overflow_dont,/* complain_on_overflow */
  1126. bfd_elf_generic_reloc, /* special_function */
  1127. "R_ARM_LDRS_SB_G2", /* name */
  1128. false, /* partial_inplace */
  1129. 0xffffffff, /* src_mask */
  1130. 0xffffffff, /* dst_mask */
  1131. true), /* pcrel_offset */
  1132. HOWTO (R_ARM_LDC_SB_G0, /* type */
  1133. 0, /* rightshift */
  1134. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1135. 32, /* bitsize */
  1136. true, /* pc_relative */
  1137. 0, /* bitpos */
  1138. complain_overflow_dont,/* complain_on_overflow */
  1139. bfd_elf_generic_reloc, /* special_function */
  1140. "R_ARM_LDC_SB_G0", /* name */
  1141. false, /* partial_inplace */
  1142. 0xffffffff, /* src_mask */
  1143. 0xffffffff, /* dst_mask */
  1144. true), /* pcrel_offset */
  1145. HOWTO (R_ARM_LDC_SB_G1, /* type */
  1146. 0, /* rightshift */
  1147. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1148. 32, /* bitsize */
  1149. true, /* pc_relative */
  1150. 0, /* bitpos */
  1151. complain_overflow_dont,/* complain_on_overflow */
  1152. bfd_elf_generic_reloc, /* special_function */
  1153. "R_ARM_LDC_SB_G1", /* name */
  1154. false, /* partial_inplace */
  1155. 0xffffffff, /* src_mask */
  1156. 0xffffffff, /* dst_mask */
  1157. true), /* pcrel_offset */
  1158. HOWTO (R_ARM_LDC_SB_G2, /* type */
  1159. 0, /* rightshift */
  1160. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1161. 32, /* bitsize */
  1162. true, /* pc_relative */
  1163. 0, /* bitpos */
  1164. complain_overflow_dont,/* complain_on_overflow */
  1165. bfd_elf_generic_reloc, /* special_function */
  1166. "R_ARM_LDC_SB_G2", /* name */
  1167. false, /* partial_inplace */
  1168. 0xffffffff, /* src_mask */
  1169. 0xffffffff, /* dst_mask */
  1170. true), /* pcrel_offset */
  1171. /* End of group relocations. */
  1172. HOWTO (R_ARM_MOVW_BREL_NC, /* type */
  1173. 0, /* rightshift */
  1174. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1175. 16, /* bitsize */
  1176. false, /* pc_relative */
  1177. 0, /* bitpos */
  1178. complain_overflow_dont,/* complain_on_overflow */
  1179. bfd_elf_generic_reloc, /* special_function */
  1180. "R_ARM_MOVW_BREL_NC", /* name */
  1181. false, /* partial_inplace */
  1182. 0x0000ffff, /* src_mask */
  1183. 0x0000ffff, /* dst_mask */
  1184. false), /* pcrel_offset */
  1185. HOWTO (R_ARM_MOVT_BREL, /* type */
  1186. 0, /* rightshift */
  1187. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1188. 16, /* bitsize */
  1189. false, /* pc_relative */
  1190. 0, /* bitpos */
  1191. complain_overflow_bitfield,/* complain_on_overflow */
  1192. bfd_elf_generic_reloc, /* special_function */
  1193. "R_ARM_MOVT_BREL", /* name */
  1194. false, /* partial_inplace */
  1195. 0x0000ffff, /* src_mask */
  1196. 0x0000ffff, /* dst_mask */
  1197. false), /* pcrel_offset */
  1198. HOWTO (R_ARM_MOVW_BREL, /* type */
  1199. 0, /* rightshift */
  1200. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1201. 16, /* bitsize */
  1202. false, /* pc_relative */
  1203. 0, /* bitpos */
  1204. complain_overflow_dont,/* complain_on_overflow */
  1205. bfd_elf_generic_reloc, /* special_function */
  1206. "R_ARM_MOVW_BREL", /* name */
  1207. false, /* partial_inplace */
  1208. 0x0000ffff, /* src_mask */
  1209. 0x0000ffff, /* dst_mask */
  1210. false), /* pcrel_offset */
  1211. HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
  1212. 0, /* rightshift */
  1213. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1214. 16, /* bitsize */
  1215. false, /* pc_relative */
  1216. 0, /* bitpos */
  1217. complain_overflow_dont,/* complain_on_overflow */
  1218. bfd_elf_generic_reloc, /* special_function */
  1219. "R_ARM_THM_MOVW_BREL_NC",/* name */
  1220. false, /* partial_inplace */
  1221. 0x040f70ff, /* src_mask */
  1222. 0x040f70ff, /* dst_mask */
  1223. false), /* pcrel_offset */
  1224. HOWTO (R_ARM_THM_MOVT_BREL, /* type */
  1225. 0, /* rightshift */
  1226. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1227. 16, /* bitsize */
  1228. false, /* pc_relative */
  1229. 0, /* bitpos */
  1230. complain_overflow_bitfield,/* complain_on_overflow */
  1231. bfd_elf_generic_reloc, /* special_function */
  1232. "R_ARM_THM_MOVT_BREL", /* name */
  1233. false, /* partial_inplace */
  1234. 0x040f70ff, /* src_mask */
  1235. 0x040f70ff, /* dst_mask */
  1236. false), /* pcrel_offset */
  1237. HOWTO (R_ARM_THM_MOVW_BREL, /* type */
  1238. 0, /* rightshift */
  1239. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1240. 16, /* bitsize */
  1241. false, /* pc_relative */
  1242. 0, /* bitpos */
  1243. complain_overflow_dont,/* complain_on_overflow */
  1244. bfd_elf_generic_reloc, /* special_function */
  1245. "R_ARM_THM_MOVW_BREL", /* name */
  1246. false, /* partial_inplace */
  1247. 0x040f70ff, /* src_mask */
  1248. 0x040f70ff, /* dst_mask */
  1249. false), /* pcrel_offset */
  1250. HOWTO (R_ARM_TLS_GOTDESC, /* type */
  1251. 0, /* rightshift */
  1252. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1253. 32, /* bitsize */
  1254. false, /* pc_relative */
  1255. 0, /* bitpos */
  1256. complain_overflow_bitfield,/* complain_on_overflow */
  1257. NULL, /* special_function */
  1258. "R_ARM_TLS_GOTDESC", /* name */
  1259. true, /* partial_inplace */
  1260. 0xffffffff, /* src_mask */
  1261. 0xffffffff, /* dst_mask */
  1262. false), /* pcrel_offset */
  1263. HOWTO (R_ARM_TLS_CALL, /* type */
  1264. 0, /* rightshift */
  1265. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1266. 24, /* bitsize */
  1267. false, /* pc_relative */
  1268. 0, /* bitpos */
  1269. complain_overflow_dont,/* complain_on_overflow */
  1270. bfd_elf_generic_reloc, /* special_function */
  1271. "R_ARM_TLS_CALL", /* name */
  1272. false, /* partial_inplace */
  1273. 0x00ffffff, /* src_mask */
  1274. 0x00ffffff, /* dst_mask */
  1275. false), /* pcrel_offset */
  1276. HOWTO (R_ARM_TLS_DESCSEQ, /* type */
  1277. 0, /* rightshift */
  1278. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1279. 0, /* bitsize */
  1280. false, /* pc_relative */
  1281. 0, /* bitpos */
  1282. complain_overflow_dont,/* complain_on_overflow */
  1283. bfd_elf_generic_reloc, /* special_function */
  1284. "R_ARM_TLS_DESCSEQ", /* name */
  1285. false, /* partial_inplace */
  1286. 0x00000000, /* src_mask */
  1287. 0x00000000, /* dst_mask */
  1288. false), /* pcrel_offset */
  1289. HOWTO (R_ARM_THM_TLS_CALL, /* type */
  1290. 0, /* rightshift */
  1291. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1292. 24, /* bitsize */
  1293. false, /* pc_relative */
  1294. 0, /* bitpos */
  1295. complain_overflow_dont,/* complain_on_overflow */
  1296. bfd_elf_generic_reloc, /* special_function */
  1297. "R_ARM_THM_TLS_CALL", /* name */
  1298. false, /* partial_inplace */
  1299. 0x07ff07ff, /* src_mask */
  1300. 0x07ff07ff, /* dst_mask */
  1301. false), /* pcrel_offset */
  1302. HOWTO (R_ARM_PLT32_ABS, /* type */
  1303. 0, /* rightshift */
  1304. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1305. 32, /* bitsize */
  1306. false, /* pc_relative */
  1307. 0, /* bitpos */
  1308. complain_overflow_dont,/* complain_on_overflow */
  1309. bfd_elf_generic_reloc, /* special_function */
  1310. "R_ARM_PLT32_ABS", /* name */
  1311. false, /* partial_inplace */
  1312. 0xffffffff, /* src_mask */
  1313. 0xffffffff, /* dst_mask */
  1314. false), /* pcrel_offset */
  1315. HOWTO (R_ARM_GOT_ABS, /* type */
  1316. 0, /* rightshift */
  1317. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1318. 32, /* bitsize */
  1319. false, /* pc_relative */
  1320. 0, /* bitpos */
  1321. complain_overflow_dont,/* complain_on_overflow */
  1322. bfd_elf_generic_reloc, /* special_function */
  1323. "R_ARM_GOT_ABS", /* name */
  1324. false, /* partial_inplace */
  1325. 0xffffffff, /* src_mask */
  1326. 0xffffffff, /* dst_mask */
  1327. false), /* pcrel_offset */
  1328. HOWTO (R_ARM_GOT_PREL, /* type */
  1329. 0, /* rightshift */
  1330. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1331. 32, /* bitsize */
  1332. true, /* pc_relative */
  1333. 0, /* bitpos */
  1334. complain_overflow_dont, /* complain_on_overflow */
  1335. bfd_elf_generic_reloc, /* special_function */
  1336. "R_ARM_GOT_PREL", /* name */
  1337. false, /* partial_inplace */
  1338. 0xffffffff, /* src_mask */
  1339. 0xffffffff, /* dst_mask */
  1340. true), /* pcrel_offset */
  1341. HOWTO (R_ARM_GOT_BREL12, /* type */
  1342. 0, /* rightshift */
  1343. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1344. 12, /* bitsize */
  1345. false, /* pc_relative */
  1346. 0, /* bitpos */
  1347. complain_overflow_bitfield,/* complain_on_overflow */
  1348. bfd_elf_generic_reloc, /* special_function */
  1349. "R_ARM_GOT_BREL12", /* name */
  1350. false, /* partial_inplace */
  1351. 0x00000fff, /* src_mask */
  1352. 0x00000fff, /* dst_mask */
  1353. false), /* pcrel_offset */
  1354. HOWTO (R_ARM_GOTOFF12, /* type */
  1355. 0, /* rightshift */
  1356. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1357. 12, /* bitsize */
  1358. false, /* pc_relative */
  1359. 0, /* bitpos */
  1360. complain_overflow_bitfield,/* complain_on_overflow */
  1361. bfd_elf_generic_reloc, /* special_function */
  1362. "R_ARM_GOTOFF12", /* name */
  1363. false, /* partial_inplace */
  1364. 0x00000fff, /* src_mask */
  1365. 0x00000fff, /* dst_mask */
  1366. false), /* pcrel_offset */
  1367. EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
  1368. /* GNU extension to record C++ vtable member usage */
  1369. HOWTO (R_ARM_GNU_VTENTRY, /* type */
  1370. 0, /* rightshift */
  1371. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1372. 0, /* bitsize */
  1373. false, /* pc_relative */
  1374. 0, /* bitpos */
  1375. complain_overflow_dont, /* complain_on_overflow */
  1376. _bfd_elf_rel_vtable_reloc_fn, /* special_function */
  1377. "R_ARM_GNU_VTENTRY", /* name */
  1378. false, /* partial_inplace */
  1379. 0, /* src_mask */
  1380. 0, /* dst_mask */
  1381. false), /* pcrel_offset */
  1382. /* GNU extension to record C++ vtable hierarchy */
  1383. HOWTO (R_ARM_GNU_VTINHERIT, /* type */
  1384. 0, /* rightshift */
  1385. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1386. 0, /* bitsize */
  1387. false, /* pc_relative */
  1388. 0, /* bitpos */
  1389. complain_overflow_dont, /* complain_on_overflow */
  1390. NULL, /* special_function */
  1391. "R_ARM_GNU_VTINHERIT", /* name */
  1392. false, /* partial_inplace */
  1393. 0, /* src_mask */
  1394. 0, /* dst_mask */
  1395. false), /* pcrel_offset */
  1396. HOWTO (R_ARM_THM_JUMP11, /* type */
  1397. 1, /* rightshift */
  1398. 1, /* size (0 = byte, 1 = short, 2 = long) */
  1399. 11, /* bitsize */
  1400. true, /* pc_relative */
  1401. 0, /* bitpos */
  1402. complain_overflow_signed, /* complain_on_overflow */
  1403. bfd_elf_generic_reloc, /* special_function */
  1404. "R_ARM_THM_JUMP11", /* name */
  1405. false, /* partial_inplace */
  1406. 0x000007ff, /* src_mask */
  1407. 0x000007ff, /* dst_mask */
  1408. true), /* pcrel_offset */
  1409. HOWTO (R_ARM_THM_JUMP8, /* type */
  1410. 1, /* rightshift */
  1411. 1, /* size (0 = byte, 1 = short, 2 = long) */
  1412. 8, /* bitsize */
  1413. true, /* pc_relative */
  1414. 0, /* bitpos */
  1415. complain_overflow_signed, /* complain_on_overflow */
  1416. bfd_elf_generic_reloc, /* special_function */
  1417. "R_ARM_THM_JUMP8", /* name */
  1418. false, /* partial_inplace */
  1419. 0x000000ff, /* src_mask */
  1420. 0x000000ff, /* dst_mask */
  1421. true), /* pcrel_offset */
  1422. /* TLS relocations */
  1423. HOWTO (R_ARM_TLS_GD32, /* type */
  1424. 0, /* rightshift */
  1425. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1426. 32, /* bitsize */
  1427. false, /* pc_relative */
  1428. 0, /* bitpos */
  1429. complain_overflow_bitfield,/* complain_on_overflow */
  1430. NULL, /* special_function */
  1431. "R_ARM_TLS_GD32", /* name */
  1432. true, /* partial_inplace */
  1433. 0xffffffff, /* src_mask */
  1434. 0xffffffff, /* dst_mask */
  1435. false), /* pcrel_offset */
  1436. HOWTO (R_ARM_TLS_LDM32, /* type */
  1437. 0, /* rightshift */
  1438. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1439. 32, /* bitsize */
  1440. false, /* pc_relative */
  1441. 0, /* bitpos */
  1442. complain_overflow_bitfield,/* complain_on_overflow */
  1443. bfd_elf_generic_reloc, /* special_function */
  1444. "R_ARM_TLS_LDM32", /* name */
  1445. true, /* partial_inplace */
  1446. 0xffffffff, /* src_mask */
  1447. 0xffffffff, /* dst_mask */
  1448. false), /* pcrel_offset */
  1449. HOWTO (R_ARM_TLS_LDO32, /* type */
  1450. 0, /* rightshift */
  1451. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1452. 32, /* bitsize */
  1453. false, /* pc_relative */
  1454. 0, /* bitpos */
  1455. complain_overflow_bitfield,/* complain_on_overflow */
  1456. bfd_elf_generic_reloc, /* special_function */
  1457. "R_ARM_TLS_LDO32", /* name */
  1458. true, /* partial_inplace */
  1459. 0xffffffff, /* src_mask */
  1460. 0xffffffff, /* dst_mask */
  1461. false), /* pcrel_offset */
  1462. HOWTO (R_ARM_TLS_IE32, /* type */
  1463. 0, /* rightshift */
  1464. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1465. 32, /* bitsize */
  1466. false, /* pc_relative */
  1467. 0, /* bitpos */
  1468. complain_overflow_bitfield,/* complain_on_overflow */
  1469. NULL, /* special_function */
  1470. "R_ARM_TLS_IE32", /* name */
  1471. true, /* partial_inplace */
  1472. 0xffffffff, /* src_mask */
  1473. 0xffffffff, /* dst_mask */
  1474. false), /* pcrel_offset */
  1475. HOWTO (R_ARM_TLS_LE32, /* type */
  1476. 0, /* rightshift */
  1477. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1478. 32, /* bitsize */
  1479. false, /* pc_relative */
  1480. 0, /* bitpos */
  1481. complain_overflow_bitfield,/* complain_on_overflow */
  1482. NULL, /* special_function */
  1483. "R_ARM_TLS_LE32", /* name */
  1484. true, /* partial_inplace */
  1485. 0xffffffff, /* src_mask */
  1486. 0xffffffff, /* dst_mask */
  1487. false), /* pcrel_offset */
  1488. HOWTO (R_ARM_TLS_LDO12, /* type */
  1489. 0, /* rightshift */
  1490. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1491. 12, /* bitsize */
  1492. false, /* pc_relative */
  1493. 0, /* bitpos */
  1494. complain_overflow_bitfield,/* complain_on_overflow */
  1495. bfd_elf_generic_reloc, /* special_function */
  1496. "R_ARM_TLS_LDO12", /* name */
  1497. false, /* partial_inplace */
  1498. 0x00000fff, /* src_mask */
  1499. 0x00000fff, /* dst_mask */
  1500. false), /* pcrel_offset */
  1501. HOWTO (R_ARM_TLS_LE12, /* type */
  1502. 0, /* rightshift */
  1503. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1504. 12, /* bitsize */
  1505. false, /* pc_relative */
  1506. 0, /* bitpos */
  1507. complain_overflow_bitfield,/* complain_on_overflow */
  1508. bfd_elf_generic_reloc, /* special_function */
  1509. "R_ARM_TLS_LE12", /* name */
  1510. false, /* partial_inplace */
  1511. 0x00000fff, /* src_mask */
  1512. 0x00000fff, /* dst_mask */
  1513. false), /* pcrel_offset */
  1514. HOWTO (R_ARM_TLS_IE12GP, /* type */
  1515. 0, /* rightshift */
  1516. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1517. 12, /* bitsize */
  1518. false, /* pc_relative */
  1519. 0, /* bitpos */
  1520. complain_overflow_bitfield,/* complain_on_overflow */
  1521. bfd_elf_generic_reloc, /* special_function */
  1522. "R_ARM_TLS_IE12GP", /* name */
  1523. false, /* partial_inplace */
  1524. 0x00000fff, /* src_mask */
  1525. 0x00000fff, /* dst_mask */
  1526. false), /* pcrel_offset */
  1527. /* 112-127 private relocations. */
  1528. EMPTY_HOWTO (112),
  1529. EMPTY_HOWTO (113),
  1530. EMPTY_HOWTO (114),
  1531. EMPTY_HOWTO (115),
  1532. EMPTY_HOWTO (116),
  1533. EMPTY_HOWTO (117),
  1534. EMPTY_HOWTO (118),
  1535. EMPTY_HOWTO (119),
  1536. EMPTY_HOWTO (120),
  1537. EMPTY_HOWTO (121),
  1538. EMPTY_HOWTO (122),
  1539. EMPTY_HOWTO (123),
  1540. EMPTY_HOWTO (124),
  1541. EMPTY_HOWTO (125),
  1542. EMPTY_HOWTO (126),
  1543. EMPTY_HOWTO (127),
  1544. /* R_ARM_ME_TOO, obsolete. */
  1545. EMPTY_HOWTO (128),
  1546. HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
  1547. 0, /* rightshift */
  1548. 1, /* size (0 = byte, 1 = short, 2 = long) */
  1549. 0, /* bitsize */
  1550. false, /* pc_relative */
  1551. 0, /* bitpos */
  1552. complain_overflow_dont,/* complain_on_overflow */
  1553. bfd_elf_generic_reloc, /* special_function */
  1554. "R_ARM_THM_TLS_DESCSEQ",/* name */
  1555. false, /* partial_inplace */
  1556. 0x00000000, /* src_mask */
  1557. 0x00000000, /* dst_mask */
  1558. false), /* pcrel_offset */
  1559. EMPTY_HOWTO (130),
  1560. EMPTY_HOWTO (131),
  1561. HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
  1562. 0, /* rightshift. */
  1563. 1, /* size (0 = byte, 1 = short, 2 = long). */
  1564. 16, /* bitsize. */
  1565. false, /* pc_relative. */
  1566. 0, /* bitpos. */
  1567. complain_overflow_bitfield,/* complain_on_overflow. */
  1568. bfd_elf_generic_reloc, /* special_function. */
  1569. "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
  1570. false, /* partial_inplace. */
  1571. 0x00000000, /* src_mask. */
  1572. 0x00000000, /* dst_mask. */
  1573. false), /* pcrel_offset. */
  1574. HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
  1575. 0, /* rightshift. */
  1576. 1, /* size (0 = byte, 1 = short, 2 = long). */
  1577. 16, /* bitsize. */
  1578. false, /* pc_relative. */
  1579. 0, /* bitpos. */
  1580. complain_overflow_bitfield,/* complain_on_overflow. */
  1581. bfd_elf_generic_reloc, /* special_function. */
  1582. "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
  1583. false, /* partial_inplace. */
  1584. 0x00000000, /* src_mask. */
  1585. 0x00000000, /* dst_mask. */
  1586. false), /* pcrel_offset. */
  1587. HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
  1588. 0, /* rightshift. */
  1589. 1, /* size (0 = byte, 1 = short, 2 = long). */
  1590. 16, /* bitsize. */
  1591. false, /* pc_relative. */
  1592. 0, /* bitpos. */
  1593. complain_overflow_bitfield,/* complain_on_overflow. */
  1594. bfd_elf_generic_reloc, /* special_function. */
  1595. "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
  1596. false, /* partial_inplace. */
  1597. 0x00000000, /* src_mask. */
  1598. 0x00000000, /* dst_mask. */
  1599. false), /* pcrel_offset. */
  1600. HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
  1601. 0, /* rightshift. */
  1602. 1, /* size (0 = byte, 1 = short, 2 = long). */
  1603. 16, /* bitsize. */
  1604. false, /* pc_relative. */
  1605. 0, /* bitpos. */
  1606. complain_overflow_bitfield,/* complain_on_overflow. */
  1607. bfd_elf_generic_reloc, /* special_function. */
  1608. "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
  1609. false, /* partial_inplace. */
  1610. 0x00000000, /* src_mask. */
  1611. 0x00000000, /* dst_mask. */
  1612. false), /* pcrel_offset. */
  1613. /* Relocations for Armv8.1-M Mainline. */
  1614. HOWTO (R_ARM_THM_BF16, /* type. */
  1615. 0, /* rightshift. */
  1616. 1, /* size (0 = byte, 1 = short, 2 = long). */
  1617. 16, /* bitsize. */
  1618. true, /* pc_relative. */
  1619. 0, /* bitpos. */
  1620. complain_overflow_dont,/* do not complain_on_overflow. */
  1621. bfd_elf_generic_reloc, /* special_function. */
  1622. "R_ARM_THM_BF16", /* name. */
  1623. false, /* partial_inplace. */
  1624. 0x001f0ffe, /* src_mask. */
  1625. 0x001f0ffe, /* dst_mask. */
  1626. true), /* pcrel_offset. */
  1627. HOWTO (R_ARM_THM_BF12, /* type. */
  1628. 0, /* rightshift. */
  1629. 1, /* size (0 = byte, 1 = short, 2 = long). */
  1630. 12, /* bitsize. */
  1631. true, /* pc_relative. */
  1632. 0, /* bitpos. */
  1633. complain_overflow_dont,/* do not complain_on_overflow. */
  1634. bfd_elf_generic_reloc, /* special_function. */
  1635. "R_ARM_THM_BF12", /* name. */
  1636. false, /* partial_inplace. */
  1637. 0x00010ffe, /* src_mask. */
  1638. 0x00010ffe, /* dst_mask. */
  1639. true), /* pcrel_offset. */
  1640. HOWTO (R_ARM_THM_BF18, /* type. */
  1641. 0, /* rightshift. */
  1642. 1, /* size (0 = byte, 1 = short, 2 = long). */
  1643. 18, /* bitsize. */
  1644. true, /* pc_relative. */
  1645. 0, /* bitpos. */
  1646. complain_overflow_dont,/* do not complain_on_overflow. */
  1647. bfd_elf_generic_reloc, /* special_function. */
  1648. "R_ARM_THM_BF18", /* name. */
  1649. false, /* partial_inplace. */
  1650. 0x007f0ffe, /* src_mask. */
  1651. 0x007f0ffe, /* dst_mask. */
  1652. true), /* pcrel_offset. */
  1653. };
  1654. /* 160 onwards: */
  1655. static reloc_howto_type elf32_arm_howto_table_2[8] =
  1656. {
  1657. HOWTO (R_ARM_IRELATIVE, /* type */
  1658. 0, /* rightshift */
  1659. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1660. 32, /* bitsize */
  1661. false, /* pc_relative */
  1662. 0, /* bitpos */
  1663. complain_overflow_bitfield,/* complain_on_overflow */
  1664. bfd_elf_generic_reloc, /* special_function */
  1665. "R_ARM_IRELATIVE", /* name */
  1666. true, /* partial_inplace */
  1667. 0xffffffff, /* src_mask */
  1668. 0xffffffff, /* dst_mask */
  1669. false), /* pcrel_offset */
  1670. HOWTO (R_ARM_GOTFUNCDESC, /* type */
  1671. 0, /* rightshift */
  1672. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1673. 32, /* bitsize */
  1674. false, /* pc_relative */
  1675. 0, /* bitpos */
  1676. complain_overflow_bitfield,/* complain_on_overflow */
  1677. bfd_elf_generic_reloc, /* special_function */
  1678. "R_ARM_GOTFUNCDESC", /* name */
  1679. false, /* partial_inplace */
  1680. 0, /* src_mask */
  1681. 0xffffffff, /* dst_mask */
  1682. false), /* pcrel_offset */
  1683. HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
  1684. 0, /* rightshift */
  1685. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1686. 32, /* bitsize */
  1687. false, /* pc_relative */
  1688. 0, /* bitpos */
  1689. complain_overflow_bitfield,/* complain_on_overflow */
  1690. bfd_elf_generic_reloc, /* special_function */
  1691. "R_ARM_GOTOFFFUNCDESC",/* name */
  1692. false, /* partial_inplace */
  1693. 0, /* src_mask */
  1694. 0xffffffff, /* dst_mask */
  1695. false), /* pcrel_offset */
  1696. HOWTO (R_ARM_FUNCDESC, /* type */
  1697. 0, /* rightshift */
  1698. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1699. 32, /* bitsize */
  1700. false, /* pc_relative */
  1701. 0, /* bitpos */
  1702. complain_overflow_bitfield,/* complain_on_overflow */
  1703. bfd_elf_generic_reloc, /* special_function */
  1704. "R_ARM_FUNCDESC", /* name */
  1705. false, /* partial_inplace */
  1706. 0, /* src_mask */
  1707. 0xffffffff, /* dst_mask */
  1708. false), /* pcrel_offset */
  1709. HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
  1710. 0, /* rightshift */
  1711. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1712. 64, /* bitsize */
  1713. false, /* pc_relative */
  1714. 0, /* bitpos */
  1715. complain_overflow_bitfield,/* complain_on_overflow */
  1716. bfd_elf_generic_reloc, /* special_function */
  1717. "R_ARM_FUNCDESC_VALUE",/* name */
  1718. false, /* partial_inplace */
  1719. 0, /* src_mask */
  1720. 0xffffffff, /* dst_mask */
  1721. false), /* pcrel_offset */
  1722. HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
  1723. 0, /* rightshift */
  1724. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1725. 32, /* bitsize */
  1726. false, /* pc_relative */
  1727. 0, /* bitpos */
  1728. complain_overflow_bitfield,/* complain_on_overflow */
  1729. bfd_elf_generic_reloc, /* special_function */
  1730. "R_ARM_TLS_GD32_FDPIC",/* name */
  1731. false, /* partial_inplace */
  1732. 0, /* src_mask */
  1733. 0xffffffff, /* dst_mask */
  1734. false), /* pcrel_offset */
  1735. HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
  1736. 0, /* rightshift */
  1737. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1738. 32, /* bitsize */
  1739. false, /* pc_relative */
  1740. 0, /* bitpos */
  1741. complain_overflow_bitfield,/* complain_on_overflow */
  1742. bfd_elf_generic_reloc, /* special_function */
  1743. "R_ARM_TLS_LDM32_FDPIC",/* name */
  1744. false, /* partial_inplace */
  1745. 0, /* src_mask */
  1746. 0xffffffff, /* dst_mask */
  1747. false), /* pcrel_offset */
  1748. HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
  1749. 0, /* rightshift */
  1750. 2, /* size (0 = byte, 1 = short, 2 = long) */
  1751. 32, /* bitsize */
  1752. false, /* pc_relative */
  1753. 0, /* bitpos */
  1754. complain_overflow_bitfield,/* complain_on_overflow */
  1755. bfd_elf_generic_reloc, /* special_function */
  1756. "R_ARM_TLS_IE32_FDPIC",/* name */
  1757. false, /* partial_inplace */
  1758. 0, /* src_mask */
  1759. 0xffffffff, /* dst_mask */
  1760. false), /* pcrel_offset */
  1761. };
  1762. /* 249-255 extended, currently unused, relocations: */
  1763. static reloc_howto_type elf32_arm_howto_table_3[4] =
  1764. {
  1765. HOWTO (R_ARM_RREL32, /* type */
  1766. 0, /* rightshift */
  1767. 0, /* size (0 = byte, 1 = short, 2 = long) */
  1768. 0, /* bitsize */
  1769. false, /* pc_relative */
  1770. 0, /* bitpos */
  1771. complain_overflow_dont,/* complain_on_overflow */
  1772. bfd_elf_generic_reloc, /* special_function */
  1773. "R_ARM_RREL32", /* name */
  1774. false, /* partial_inplace */
  1775. 0, /* src_mask */
  1776. 0, /* dst_mask */
  1777. false), /* pcrel_offset */
  1778. HOWTO (R_ARM_RABS32, /* type */
  1779. 0, /* rightshift */
  1780. 0, /* size (0 = byte, 1 = short, 2 = long) */
  1781. 0, /* bitsize */
  1782. false, /* pc_relative */
  1783. 0, /* bitpos */
  1784. complain_overflow_dont,/* complain_on_overflow */
  1785. bfd_elf_generic_reloc, /* special_function */
  1786. "R_ARM_RABS32", /* name */
  1787. false, /* partial_inplace */
  1788. 0, /* src_mask */
  1789. 0, /* dst_mask */
  1790. false), /* pcrel_offset */
  1791. HOWTO (R_ARM_RPC24, /* type */
  1792. 0, /* rightshift */
  1793. 0, /* size (0 = byte, 1 = short, 2 = long) */
  1794. 0, /* bitsize */
  1795. false, /* pc_relative */
  1796. 0, /* bitpos */
  1797. complain_overflow_dont,/* complain_on_overflow */
  1798. bfd_elf_generic_reloc, /* special_function */
  1799. "R_ARM_RPC24", /* name */
  1800. false, /* partial_inplace */
  1801. 0, /* src_mask */
  1802. 0, /* dst_mask */
  1803. false), /* pcrel_offset */
  1804. HOWTO (R_ARM_RBASE, /* type */
  1805. 0, /* rightshift */
  1806. 0, /* size (0 = byte, 1 = short, 2 = long) */
  1807. 0, /* bitsize */
  1808. false, /* pc_relative */
  1809. 0, /* bitpos */
  1810. complain_overflow_dont,/* complain_on_overflow */
  1811. bfd_elf_generic_reloc, /* special_function */
  1812. "R_ARM_RBASE", /* name */
  1813. false, /* partial_inplace */
  1814. 0, /* src_mask */
  1815. 0, /* dst_mask */
  1816. false) /* pcrel_offset */
  1817. };
  1818. static reloc_howto_type *
  1819. elf32_arm_howto_from_type (unsigned int r_type)
  1820. {
  1821. if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
  1822. return &elf32_arm_howto_table_1[r_type];
  1823. if (r_type >= R_ARM_IRELATIVE
  1824. && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
  1825. return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
  1826. if (r_type >= R_ARM_RREL32
  1827. && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
  1828. return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
  1829. return NULL;
  1830. }
  1831. static bool
  1832. elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
  1833. Elf_Internal_Rela * elf_reloc)
  1834. {
  1835. unsigned int r_type;
  1836. r_type = ELF32_R_TYPE (elf_reloc->r_info);
  1837. if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
  1838. {
  1839. /* xgettext:c-format */
  1840. _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
  1841. abfd, r_type);
  1842. bfd_set_error (bfd_error_bad_value);
  1843. return false;
  1844. }
  1845. return true;
  1846. }
  1847. struct elf32_arm_reloc_map
  1848. {
  1849. bfd_reloc_code_real_type bfd_reloc_val;
  1850. unsigned char elf_reloc_val;
  1851. };
  1852. /* All entries in this list must also be present in elf32_arm_howto_table. */
  1853. static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
  1854. {
  1855. {BFD_RELOC_NONE, R_ARM_NONE},
  1856. {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
  1857. {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
  1858. {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
  1859. {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
  1860. {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
  1861. {BFD_RELOC_32, R_ARM_ABS32},
  1862. {BFD_RELOC_32_PCREL, R_ARM_REL32},
  1863. {BFD_RELOC_8, R_ARM_ABS8},
  1864. {BFD_RELOC_16, R_ARM_ABS16},
  1865. {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
  1866. {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
  1867. {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
  1868. {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
  1869. {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
  1870. {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
  1871. {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
  1872. {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
  1873. {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
  1874. {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
  1875. {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
  1876. {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
  1877. {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
  1878. {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
  1879. {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
  1880. {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
  1881. {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
  1882. {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
  1883. {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
  1884. {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
  1885. {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
  1886. {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
  1887. {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
  1888. {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
  1889. {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
  1890. {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
  1891. {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
  1892. {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
  1893. {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
  1894. {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
  1895. {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
  1896. {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
  1897. {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
  1898. {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
  1899. {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
  1900. {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
  1901. {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
  1902. {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
  1903. {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
  1904. {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
  1905. {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
  1906. {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
  1907. {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
  1908. {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
  1909. {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
  1910. {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
  1911. {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
  1912. {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
  1913. {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
  1914. {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
  1915. {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
  1916. {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
  1917. {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
  1918. {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
  1919. {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
  1920. {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
  1921. {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
  1922. {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
  1923. {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
  1924. {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
  1925. {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
  1926. {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
  1927. {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
  1928. {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
  1929. {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
  1930. {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
  1931. {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
  1932. {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
  1933. {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
  1934. {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
  1935. {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
  1936. {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
  1937. {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
  1938. {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
  1939. {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
  1940. {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
  1941. {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
  1942. {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
  1943. {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
  1944. {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
  1945. {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
  1946. {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
  1947. {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
  1948. {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
  1949. {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
  1950. {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
  1951. {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
  1952. {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
  1953. {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
  1954. {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
  1955. };
  1956. static reloc_howto_type *
  1957. elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
  1958. bfd_reloc_code_real_type code)
  1959. {
  1960. unsigned int i;
  1961. for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
  1962. if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
  1963. return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
  1964. return NULL;
  1965. }
  1966. static reloc_howto_type *
  1967. elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
  1968. const char *r_name)
  1969. {
  1970. unsigned int i;
  1971. for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
  1972. if (elf32_arm_howto_table_1[i].name != NULL
  1973. && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
  1974. return &elf32_arm_howto_table_1[i];
  1975. for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
  1976. if (elf32_arm_howto_table_2[i].name != NULL
  1977. && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
  1978. return &elf32_arm_howto_table_2[i];
  1979. for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
  1980. if (elf32_arm_howto_table_3[i].name != NULL
  1981. && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
  1982. return &elf32_arm_howto_table_3[i];
  1983. return NULL;
  1984. }
  1985. /* Support for core dump NOTE sections. */
  1986. static bool
  1987. elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
  1988. {
  1989. int offset;
  1990. size_t size;
  1991. switch (note->descsz)
  1992. {
  1993. default:
  1994. return false;
  1995. case 148: /* Linux/ARM 32-bit. */
  1996. /* pr_cursig */
  1997. elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
  1998. /* pr_pid */
  1999. elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
  2000. /* pr_reg */
  2001. offset = 72;
  2002. size = 72;
  2003. break;
  2004. }
  2005. /* Make a ".reg/999" section. */
  2006. return _bfd_elfcore_make_pseudosection (abfd, ".reg",
  2007. size, note->descpos + offset);
  2008. }
  2009. static bool
  2010. elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
  2011. {
  2012. switch (note->descsz)
  2013. {
  2014. default:
  2015. return false;
  2016. case 124: /* Linux/ARM elf_prpsinfo. */
  2017. elf_tdata (abfd)->core->pid
  2018. = bfd_get_32 (abfd, note->descdata + 12);
  2019. elf_tdata (abfd)->core->program
  2020. = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
  2021. elf_tdata (abfd)->core->command
  2022. = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
  2023. }
  2024. /* Note that for some reason, a spurious space is tacked
  2025. onto the end of the args in some (at least one anyway)
  2026. implementations, so strip it off if it exists. */
  2027. {
  2028. char *command = elf_tdata (abfd)->core->command;
  2029. int n = strlen (command);
  2030. if (0 < n && command[n - 1] == ' ')
  2031. command[n - 1] = '\0';
  2032. }
  2033. return true;
  2034. }
  2035. static char *
  2036. elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
  2037. int note_type, ...)
  2038. {
  2039. switch (note_type)
  2040. {
  2041. default:
  2042. return NULL;
  2043. case NT_PRPSINFO:
  2044. {
  2045. char data[124] ATTRIBUTE_NONSTRING;
  2046. va_list ap;
  2047. va_start (ap, note_type);
  2048. memset (data, 0, sizeof (data));
  2049. strncpy (data + 28, va_arg (ap, const char *), 16);
  2050. #if GCC_VERSION == 8000 || GCC_VERSION == 8001
  2051. DIAGNOSTIC_PUSH;
  2052. /* GCC 8.0 and 8.1 warn about 80 equals destination size with
  2053. -Wstringop-truncation:
  2054. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
  2055. */
  2056. DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
  2057. #endif
  2058. strncpy (data + 44, va_arg (ap, const char *), 80);
  2059. #if GCC_VERSION == 8000 || GCC_VERSION == 8001
  2060. DIAGNOSTIC_POP;
  2061. #endif
  2062. va_end (ap);
  2063. return elfcore_write_note (abfd, buf, bufsiz,
  2064. "CORE", note_type, data, sizeof (data));
  2065. }
  2066. case NT_PRSTATUS:
  2067. {
  2068. char data[148];
  2069. va_list ap;
  2070. long pid;
  2071. int cursig;
  2072. const void *greg;
  2073. va_start (ap, note_type);
  2074. memset (data, 0, sizeof (data));
  2075. pid = va_arg (ap, long);
  2076. bfd_put_32 (abfd, pid, data + 24);
  2077. cursig = va_arg (ap, int);
  2078. bfd_put_16 (abfd, cursig, data + 12);
  2079. greg = va_arg (ap, const void *);
  2080. memcpy (data + 72, greg, 72);
  2081. va_end (ap);
  2082. return elfcore_write_note (abfd, buf, bufsiz,
  2083. "CORE", note_type, data, sizeof (data));
  2084. }
  2085. }
  2086. }
  2087. #define TARGET_LITTLE_SYM arm_elf32_le_vec
  2088. #define TARGET_LITTLE_NAME "elf32-littlearm"
  2089. #define TARGET_BIG_SYM arm_elf32_be_vec
  2090. #define TARGET_BIG_NAME "elf32-bigarm"
  2091. #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
  2092. #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
  2093. #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
  2094. typedef unsigned long int insn32;
  2095. typedef unsigned short int insn16;
  2096. /* In lieu of proper flags, assume all EABIv4 or later objects are
  2097. interworkable. */
  2098. #define INTERWORK_FLAG(abfd) \
  2099. (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
  2100. || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
  2101. || ((abfd)->flags & BFD_LINKER_CREATED))
  2102. /* The linker script knows the section names for placement.
  2103. The entry_names are used to do simple name mangling on the stubs.
  2104. Given a function name, and its type, the stub can be found. The
  2105. name can be changed. The only requirement is the %s be present. */
  2106. #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
  2107. #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
  2108. #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
  2109. #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
  2110. #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
  2111. #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
  2112. #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
  2113. #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
  2114. #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
  2115. #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
  2116. #define STUB_ENTRY_NAME "__%s_veneer"
  2117. #define CMSE_PREFIX "__acle_se_"
  2118. #define CMSE_STUB_NAME ".gnu.sgstubs"
  2119. /* The name of the dynamic interpreter. This is put in the .interp
  2120. section. */
  2121. #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
  2122. /* FDPIC default stack size. */
  2123. #define DEFAULT_STACK_SIZE 0x8000
  2124. static const unsigned long tls_trampoline [] =
  2125. {
  2126. 0xe08e0000, /* add r0, lr, r0 */
  2127. 0xe5901004, /* ldr r1, [r0,#4] */
  2128. 0xe12fff11, /* bx r1 */
  2129. };
  2130. static const unsigned long dl_tlsdesc_lazy_trampoline [] =
  2131. {
  2132. 0xe52d2004, /* push {r2} */
  2133. 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
  2134. 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
  2135. 0xe79f2002, /* 1: ldr r2, [pc, r2] */
  2136. 0xe081100f, /* 2: add r1, pc */
  2137. 0xe12fff12, /* bx r2 */
  2138. 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
  2139. + dl_tlsdesc_lazy_resolver(GOT) */
  2140. 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
  2141. };
  2142. /* NOTE: [Thumb nop sequence]
  2143. When adding code that transitions from Thumb to Arm the instruction that
  2144. should be used for the alignment padding should be 0xe7fd (b .-2) instead of
  2145. a nop for performance reasons. */
  2146. /* ARM FDPIC PLT entry. */
  2147. /* The last 5 words contain PLT lazy fragment code and data. */
  2148. static const bfd_vma elf32_arm_fdpic_plt_entry [] =
  2149. {
  2150. 0xe59fc008, /* ldr r12, .L1 */
  2151. 0xe08cc009, /* add r12, r12, r9 */
  2152. 0xe59c9004, /* ldr r9, [r12, #4] */
  2153. 0xe59cf000, /* ldr pc, [r12] */
  2154. 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
  2155. 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
  2156. 0xe51fc00c, /* ldr r12, [pc, #-12] */
  2157. 0xe92d1000, /* push {r12} */
  2158. 0xe599c004, /* ldr r12, [r9, #4] */
  2159. 0xe599f000, /* ldr pc, [r9] */
  2160. };
  2161. /* Thumb FDPIC PLT entry. */
  2162. /* The last 5 words contain PLT lazy fragment code and data. */
  2163. static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
  2164. {
  2165. 0xc00cf8df, /* ldr.w r12, .L1 */
  2166. 0x0c09eb0c, /* add.w r12, r12, r9 */
  2167. 0x9004f8dc, /* ldr.w r9, [r12, #4] */
  2168. 0xf000f8dc, /* ldr.w pc, [r12] */
  2169. 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
  2170. 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
  2171. 0xc008f85f, /* ldr.w r12, .L2 */
  2172. 0xcd04f84d, /* push {r12} */
  2173. 0xc004f8d9, /* ldr.w r12, [r9, #4] */
  2174. 0xf000f8d9, /* ldr.w pc, [r9] */
  2175. };
  2176. #ifdef FOUR_WORD_PLT
  2177. /* The first entry in a procedure linkage table looks like
  2178. this. It is set up so that any shared library function that is
  2179. called before the relocation has been set up calls the dynamic
  2180. linker first. */
  2181. static const bfd_vma elf32_arm_plt0_entry [] =
  2182. {
  2183. 0xe52de004, /* str lr, [sp, #-4]! */
  2184. 0xe59fe010, /* ldr lr, [pc, #16] */
  2185. 0xe08fe00e, /* add lr, pc, lr */
  2186. 0xe5bef008, /* ldr pc, [lr, #8]! */
  2187. };
  2188. /* Subsequent entries in a procedure linkage table look like
  2189. this. */
  2190. static const bfd_vma elf32_arm_plt_entry [] =
  2191. {
  2192. 0xe28fc600, /* add ip, pc, #NN */
  2193. 0xe28cca00, /* add ip, ip, #NN */
  2194. 0xe5bcf000, /* ldr pc, [ip, #NN]! */
  2195. 0x00000000, /* unused */
  2196. };
  2197. #else /* not FOUR_WORD_PLT */
  2198. /* The first entry in a procedure linkage table looks like
  2199. this. It is set up so that any shared library function that is
  2200. called before the relocation has been set up calls the dynamic
  2201. linker first. */
  2202. static const bfd_vma elf32_arm_plt0_entry [] =
  2203. {
  2204. 0xe52de004, /* str lr, [sp, #-4]! */
  2205. 0xe59fe004, /* ldr lr, [pc, #4] */
  2206. 0xe08fe00e, /* add lr, pc, lr */
  2207. 0xe5bef008, /* ldr pc, [lr, #8]! */
  2208. 0x00000000, /* &GOT[0] - . */
  2209. };
  2210. /* By default subsequent entries in a procedure linkage table look like
  2211. this. Offsets that don't fit into 28 bits will cause link error. */
  2212. static const bfd_vma elf32_arm_plt_entry_short [] =
  2213. {
  2214. 0xe28fc600, /* add ip, pc, #0xNN00000 */
  2215. 0xe28cca00, /* add ip, ip, #0xNN000 */
  2216. 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
  2217. };
  2218. /* When explicitly asked, we'll use this "long" entry format
  2219. which can cope with arbitrary displacements. */
  2220. static const bfd_vma elf32_arm_plt_entry_long [] =
  2221. {
  2222. 0xe28fc200, /* add ip, pc, #0xN0000000 */
  2223. 0xe28cc600, /* add ip, ip, #0xNN00000 */
  2224. 0xe28cca00, /* add ip, ip, #0xNN000 */
  2225. 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
  2226. };
  2227. static bool elf32_arm_use_long_plt_entry = false;
  2228. #endif /* not FOUR_WORD_PLT */
  2229. /* The first entry in a procedure linkage table looks like this.
  2230. It is set up so that any shared library function that is called before the
  2231. relocation has been set up calls the dynamic linker first. */
  2232. static const bfd_vma elf32_thumb2_plt0_entry [] =
  2233. {
  2234. /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
  2235. an instruction maybe encoded to one or two array elements. */
  2236. 0xf8dfb500, /* push {lr} */
  2237. 0x44fee008, /* ldr.w lr, [pc, #8] */
  2238. /* add lr, pc */
  2239. 0xff08f85e, /* ldr.w pc, [lr, #8]! */
  2240. 0x00000000, /* &GOT[0] - . */
  2241. };
  2242. /* Subsequent entries in a procedure linkage table for thumb only target
  2243. look like this. */
  2244. static const bfd_vma elf32_thumb2_plt_entry [] =
  2245. {
  2246. /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
  2247. an instruction maybe encoded to one or two array elements. */
  2248. 0x0c00f240, /* movw ip, #0xNNNN */
  2249. 0x0c00f2c0, /* movt ip, #0xNNNN */
  2250. 0xf8dc44fc, /* add ip, pc */
  2251. 0xe7fcf000 /* ldr.w pc, [ip] */
  2252. /* b .-4 */
  2253. };
  2254. /* The format of the first entry in the procedure linkage table
  2255. for a VxWorks executable. */
  2256. static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
  2257. {
  2258. 0xe52dc008, /* str ip,[sp,#-8]! */
  2259. 0xe59fc000, /* ldr ip,[pc] */
  2260. 0xe59cf008, /* ldr pc,[ip,#8] */
  2261. 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
  2262. };
  2263. /* The format of subsequent entries in a VxWorks executable. */
  2264. static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
  2265. {
  2266. 0xe59fc000, /* ldr ip,[pc] */
  2267. 0xe59cf000, /* ldr pc,[ip] */
  2268. 0x00000000, /* .long @got */
  2269. 0xe59fc000, /* ldr ip,[pc] */
  2270. 0xea000000, /* b _PLT */
  2271. 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
  2272. };
  2273. /* The format of entries in a VxWorks shared library. */
  2274. static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
  2275. {
  2276. 0xe59fc000, /* ldr ip,[pc] */
  2277. 0xe79cf009, /* ldr pc,[ip,r9] */
  2278. 0x00000000, /* .long @got */
  2279. 0xe59fc000, /* ldr ip,[pc] */
  2280. 0xe599f008, /* ldr pc,[r9,#8] */
  2281. 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
  2282. };
  2283. /* An initial stub used if the PLT entry is referenced from Thumb code. */
  2284. #define PLT_THUMB_STUB_SIZE 4
  2285. static const bfd_vma elf32_arm_plt_thumb_stub [] =
  2286. {
  2287. 0x4778, /* bx pc */
  2288. 0xe7fd /* b .-2 */
  2289. };
  2290. /* The first entry in a procedure linkage table looks like
  2291. this. It is set up so that any shared library function that is
  2292. called before the relocation has been set up calls the dynamic
  2293. linker first. */
  2294. static const bfd_vma elf32_arm_nacl_plt0_entry [] =
  2295. {
  2296. /* First bundle: */
  2297. 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
  2298. 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
  2299. 0xe08cc00f, /* add ip, ip, pc */
  2300. 0xe52dc008, /* str ip, [sp, #-8]! */
  2301. /* Second bundle: */
  2302. 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
  2303. 0xe59cc000, /* ldr ip, [ip] */
  2304. 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
  2305. 0xe12fff1c, /* bx ip */
  2306. /* Third bundle: */
  2307. 0xe320f000, /* nop */
  2308. 0xe320f000, /* nop */
  2309. 0xe320f000, /* nop */
  2310. /* .Lplt_tail: */
  2311. 0xe50dc004, /* str ip, [sp, #-4] */
  2312. /* Fourth bundle: */
  2313. 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
  2314. 0xe59cc000, /* ldr ip, [ip] */
  2315. 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
  2316. 0xe12fff1c, /* bx ip */
  2317. };
  2318. #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
  2319. /* Subsequent entries in a procedure linkage table look like this. */
  2320. static const bfd_vma elf32_arm_nacl_plt_entry [] =
  2321. {
  2322. 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
  2323. 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
  2324. 0xe08cc00f, /* add ip, ip, pc */
  2325. 0xea000000, /* b .Lplt_tail */
  2326. };
  2327. /* PR 28924:
  2328. There was a bug due to too high values of THM_MAX_FWD_BRANCH_OFFSET and
  2329. THM2_MAX_FWD_BRANCH_OFFSET. The first macro concerns the case when Thumb-2
  2330. is not available, and second macro when Thumb-2 is available. Among other
  2331. things, they affect the range of branches represented as BLX instructions
  2332. in Encoding T2 defined in Section A8.8.25 of the ARM Architecture
  2333. Reference Manual ARMv7-A and ARMv7-R edition issue C.d. Such branches are
  2334. specified there to have a maximum forward offset that is a multiple of 4.
  2335. Previously, the respective values defined here were multiples of 2 but not
  2336. 4 and they are included in comments for reference. */
  2337. #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
  2338. #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
  2339. #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) - 4 + 4)
  2340. /* #def THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) - 2 + 4) */
  2341. #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
  2342. #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 4) + 4)
  2343. /* #def THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4) */
  2344. #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
  2345. #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
  2346. #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
  2347. enum stub_insn_type
  2348. {
  2349. THUMB16_TYPE = 1,
  2350. THUMB32_TYPE,
  2351. ARM_TYPE,
  2352. DATA_TYPE
  2353. };
  2354. #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
  2355. /* A bit of a hack. A Thumb conditional branch, in which the proper condition
  2356. is inserted in arm_build_one_stub(). */
  2357. #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
  2358. #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
  2359. #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
  2360. #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
  2361. #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
  2362. #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
  2363. #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
  2364. #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
  2365. typedef struct
  2366. {
  2367. bfd_vma data;
  2368. enum stub_insn_type type;
  2369. unsigned int r_type;
  2370. int reloc_addend;
  2371. } insn_sequence;
  2372. /* See note [Thumb nop sequence] when adding a veneer. */
  2373. /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
  2374. to reach the stub if necessary. */
  2375. static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
  2376. {
  2377. ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
  2378. DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
  2379. };
  2380. /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
  2381. available. */
  2382. static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
  2383. {
  2384. ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
  2385. ARM_INSN (0xe12fff1c), /* bx ip */
  2386. DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
  2387. };
  2388. /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
  2389. static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
  2390. {
  2391. THUMB16_INSN (0xb401), /* push {r0} */
  2392. THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
  2393. THUMB16_INSN (0x4684), /* mov ip, r0 */
  2394. THUMB16_INSN (0xbc01), /* pop {r0} */
  2395. THUMB16_INSN (0x4760), /* bx ip */
  2396. THUMB16_INSN (0xbf00), /* nop */
  2397. DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
  2398. };
  2399. /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
  2400. static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
  2401. {
  2402. THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
  2403. DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
  2404. };
  2405. /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
  2406. M-profile architectures. */
  2407. static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
  2408. {
  2409. THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
  2410. THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
  2411. THUMB16_INSN (0x4760), /* bx ip */
  2412. };
  2413. /* V4T Thumb -> Thumb long branch stub. Using the stack is not
  2414. allowed. */
  2415. static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
  2416. {
  2417. THUMB16_INSN (0x4778), /* bx pc */
  2418. THUMB16_INSN (0xe7fd), /* b .-2 */
  2419. ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
  2420. ARM_INSN (0xe12fff1c), /* bx ip */
  2421. DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
  2422. };
  2423. /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
  2424. available. */
  2425. static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
  2426. {
  2427. THUMB16_INSN (0x4778), /* bx pc */
  2428. THUMB16_INSN (0xe7fd), /* b .-2 */
  2429. ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
  2430. DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
  2431. };
  2432. /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
  2433. one, when the destination is close enough. */
  2434. static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
  2435. {
  2436. THUMB16_INSN (0x4778), /* bx pc */
  2437. THUMB16_INSN (0xe7fd), /* b .-2 */
  2438. ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
  2439. };
  2440. /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
  2441. blx to reach the stub if necessary. */
  2442. static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
  2443. {
  2444. ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
  2445. ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
  2446. DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
  2447. };
  2448. /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
  2449. blx to reach the stub if necessary. We can not add into pc;
  2450. it is not guaranteed to mode switch (different in ARMv6 and
  2451. ARMv7). */
  2452. static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
  2453. {
  2454. ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
  2455. ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
  2456. ARM_INSN (0xe12fff1c), /* bx ip */
  2457. DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
  2458. };
  2459. /* V4T ARM -> ARM long branch stub, PIC. */
  2460. static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
  2461. {
  2462. ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
  2463. ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
  2464. ARM_INSN (0xe12fff1c), /* bx ip */
  2465. DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
  2466. };
  2467. /* V4T Thumb -> ARM long branch stub, PIC. */
  2468. static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
  2469. {
  2470. THUMB16_INSN (0x4778), /* bx pc */
  2471. THUMB16_INSN (0xe7fd), /* b .-2 */
  2472. ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
  2473. ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
  2474. DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
  2475. };
  2476. /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
  2477. architectures. */
  2478. static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
  2479. {
  2480. THUMB16_INSN (0xb401), /* push {r0} */
  2481. THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
  2482. THUMB16_INSN (0x46fc), /* mov ip, pc */
  2483. THUMB16_INSN (0x4484), /* add ip, r0 */
  2484. THUMB16_INSN (0xbc01), /* pop {r0} */
  2485. THUMB16_INSN (0x4760), /* bx ip */
  2486. DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
  2487. };
  2488. /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
  2489. allowed. */
  2490. static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
  2491. {
  2492. THUMB16_INSN (0x4778), /* bx pc */
  2493. THUMB16_INSN (0xe7fd), /* b .-2 */
  2494. ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
  2495. ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
  2496. ARM_INSN (0xe12fff1c), /* bx ip */
  2497. DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
  2498. };
  2499. /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
  2500. long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
  2501. static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
  2502. {
  2503. ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
  2504. ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
  2505. DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
  2506. };
  2507. /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
  2508. long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
  2509. static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
  2510. {
  2511. THUMB16_INSN (0x4778), /* bx pc */
  2512. THUMB16_INSN (0xe7fd), /* b .-2 */
  2513. ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
  2514. ARM_INSN (0xe081f00f), /* add pc, r1, pc */
  2515. DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
  2516. };
  2517. /* NaCl ARM -> ARM long branch stub. */
  2518. static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
  2519. {
  2520. ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
  2521. ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
  2522. ARM_INSN (0xe12fff1c), /* bx ip */
  2523. ARM_INSN (0xe320f000), /* nop */
  2524. ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
  2525. DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
  2526. DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
  2527. DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
  2528. };
  2529. /* NaCl ARM -> ARM long branch stub, PIC. */
  2530. static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
  2531. {
  2532. ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
  2533. ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
  2534. ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
  2535. ARM_INSN (0xe12fff1c), /* bx ip */
  2536. ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
  2537. DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
  2538. DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
  2539. DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
  2540. };
  2541. /* Stub used for transition to secure state (aka SG veneer). */
  2542. static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
  2543. {
  2544. THUMB32_INSN (0xe97fe97f), /* sg. */
  2545. THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
  2546. };
  2547. /* Cortex-A8 erratum-workaround stubs. */
  2548. /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
  2549. can't use a conditional branch to reach this stub). */
  2550. static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
  2551. {
  2552. THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
  2553. THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
  2554. THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
  2555. };
  2556. /* Stub used for b.w and bl.w instructions. */
  2557. static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
  2558. {
  2559. THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
  2560. };
  2561. static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
  2562. {
  2563. THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
  2564. };
  2565. /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
  2566. instruction (which switches to ARM mode) to point to this stub. Jump to the
  2567. real destination using an ARM-mode branch. */
  2568. static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
  2569. {
  2570. ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
  2571. };
  2572. /* For each section group there can be a specially created linker section
  2573. to hold the stubs for that group. The name of the stub section is based
  2574. upon the name of another section within that group with the suffix below
  2575. applied.
  2576. PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
  2577. create what appeared to be a linker stub section when it actually
  2578. contained user code/data. For example, consider this fragment:
  2579. const char * stubborn_problems[] = { "np" };
  2580. If this is compiled with "-fPIC -fdata-sections" then gcc produces a
  2581. section called:
  2582. .data.rel.local.stubborn_problems
  2583. This then causes problems in arm32_arm_build_stubs() as it triggers:
  2584. // Ignore non-stub sections.
  2585. if (!strstr (stub_sec->name, STUB_SUFFIX))
  2586. continue;
  2587. And so the section would be ignored instead of being processed. Hence
  2588. the change in definition of STUB_SUFFIX to a name that cannot be a valid
  2589. C identifier. */
  2590. #define STUB_SUFFIX ".__stub"
  2591. /* One entry per long/short branch stub defined above. */
  2592. #define DEF_STUBS \
  2593. DEF_STUB (long_branch_any_any) \
  2594. DEF_STUB (long_branch_v4t_arm_thumb) \
  2595. DEF_STUB (long_branch_thumb_only) \
  2596. DEF_STUB (long_branch_v4t_thumb_thumb) \
  2597. DEF_STUB (long_branch_v4t_thumb_arm) \
  2598. DEF_STUB (short_branch_v4t_thumb_arm) \
  2599. DEF_STUB (long_branch_any_arm_pic) \
  2600. DEF_STUB (long_branch_any_thumb_pic) \
  2601. DEF_STUB (long_branch_v4t_thumb_thumb_pic) \
  2602. DEF_STUB (long_branch_v4t_arm_thumb_pic) \
  2603. DEF_STUB (long_branch_v4t_thumb_arm_pic) \
  2604. DEF_STUB (long_branch_thumb_only_pic) \
  2605. DEF_STUB (long_branch_any_tls_pic) \
  2606. DEF_STUB (long_branch_v4t_thumb_tls_pic) \
  2607. DEF_STUB (long_branch_arm_nacl) \
  2608. DEF_STUB (long_branch_arm_nacl_pic) \
  2609. DEF_STUB (cmse_branch_thumb_only) \
  2610. DEF_STUB (a8_veneer_b_cond) \
  2611. DEF_STUB (a8_veneer_b) \
  2612. DEF_STUB (a8_veneer_bl) \
  2613. DEF_STUB (a8_veneer_blx) \
  2614. DEF_STUB (long_branch_thumb2_only) \
  2615. DEF_STUB (long_branch_thumb2_only_pure)
  2616. #define DEF_STUB(x) arm_stub_##x,
  2617. enum elf32_arm_stub_type
  2618. {
  2619. arm_stub_none,
  2620. DEF_STUBS
  2621. max_stub_type
  2622. };
  2623. #undef DEF_STUB
  2624. /* Note the first a8_veneer type. */
  2625. const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
  2626. typedef struct
  2627. {
  2628. const insn_sequence* template_sequence;
  2629. int template_size;
  2630. } stub_def;
  2631. #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
  2632. static const stub_def stub_definitions[] =
  2633. {
  2634. {NULL, 0},
  2635. DEF_STUBS
  2636. };
  2637. struct elf32_arm_stub_hash_entry
  2638. {
  2639. /* Base hash table entry structure. */
  2640. struct bfd_hash_entry root;
  2641. /* The stub section. */
  2642. asection *stub_sec;
  2643. /* Offset within stub_sec of the beginning of this stub. */
  2644. bfd_vma stub_offset;
  2645. /* Given the symbol's value and its section we can determine its final
  2646. value when building the stubs (so the stub knows where to jump). */
  2647. bfd_vma target_value;
  2648. asection *target_section;
  2649. /* Same as above but for the source of the branch to the stub. Used for
  2650. Cortex-A8 erratum workaround to patch it to branch to the stub. As
  2651. such, source section does not need to be recorded since Cortex-A8 erratum
  2652. workaround stubs are only generated when both source and target are in the
  2653. same section. */
  2654. bfd_vma source_value;
  2655. /* The instruction which caused this stub to be generated (only valid for
  2656. Cortex-A8 erratum workaround stubs at present). */
  2657. unsigned long orig_insn;
  2658. /* The stub type. */
  2659. enum elf32_arm_stub_type stub_type;
  2660. /* Its encoding size in bytes. */
  2661. int stub_size;
  2662. /* Its template. */
  2663. const insn_sequence *stub_template;
  2664. /* The size of the template (number of entries). */
  2665. int stub_template_size;
  2666. /* The symbol table entry, if any, that this was derived from. */
  2667. struct elf32_arm_link_hash_entry *h;
  2668. /* Type of branch. */
  2669. enum arm_st_branch_type branch_type;
  2670. /* Where this stub is being called from, or, in the case of combined
  2671. stub sections, the first input section in the group. */
  2672. asection *id_sec;
  2673. /* The name for the local symbol at the start of this stub. The
  2674. stub name in the hash table has to be unique; this does not, so
  2675. it can be friendlier. */
  2676. char *output_name;
  2677. };
  2678. /* Used to build a map of a section. This is required for mixed-endian
  2679. code/data. */
  2680. typedef struct elf32_elf_section_map
  2681. {
  2682. bfd_vma vma;
  2683. char type;
  2684. }
  2685. elf32_arm_section_map;
  2686. /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
  2687. typedef enum
  2688. {
  2689. VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
  2690. VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
  2691. VFP11_ERRATUM_ARM_VENEER,
  2692. VFP11_ERRATUM_THUMB_VENEER
  2693. }
  2694. elf32_vfp11_erratum_type;
  2695. typedef struct elf32_vfp11_erratum_list
  2696. {
  2697. struct elf32_vfp11_erratum_list *next;
  2698. bfd_vma vma;
  2699. union
  2700. {
  2701. struct
  2702. {
  2703. struct elf32_vfp11_erratum_list *veneer;
  2704. unsigned int vfp_insn;
  2705. } b;
  2706. struct
  2707. {
  2708. struct elf32_vfp11_erratum_list *branch;
  2709. unsigned int id;
  2710. } v;
  2711. } u;
  2712. elf32_vfp11_erratum_type type;
  2713. }
  2714. elf32_vfp11_erratum_list;
  2715. /* Information about a STM32L4XX erratum veneer, or a branch to such a
  2716. veneer. */
  2717. typedef enum
  2718. {
  2719. STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
  2720. STM32L4XX_ERRATUM_VENEER
  2721. }
  2722. elf32_stm32l4xx_erratum_type;
  2723. typedef struct elf32_stm32l4xx_erratum_list
  2724. {
  2725. struct elf32_stm32l4xx_erratum_list *next;
  2726. bfd_vma vma;
  2727. union
  2728. {
  2729. struct
  2730. {
  2731. struct elf32_stm32l4xx_erratum_list *veneer;
  2732. unsigned int insn;
  2733. } b;
  2734. struct
  2735. {
  2736. struct elf32_stm32l4xx_erratum_list *branch;
  2737. unsigned int id;
  2738. } v;
  2739. } u;
  2740. elf32_stm32l4xx_erratum_type type;
  2741. }
  2742. elf32_stm32l4xx_erratum_list;
  2743. typedef enum
  2744. {
  2745. DELETE_EXIDX_ENTRY,
  2746. INSERT_EXIDX_CANTUNWIND_AT_END
  2747. }
  2748. arm_unwind_edit_type;
  2749. /* A (sorted) list of edits to apply to an unwind table. */
  2750. typedef struct arm_unwind_table_edit
  2751. {
  2752. arm_unwind_edit_type type;
  2753. /* Note: we sometimes want to insert an unwind entry corresponding to a
  2754. section different from the one we're currently writing out, so record the
  2755. (text) section this edit relates to here. */
  2756. asection *linked_section;
  2757. unsigned int index;
  2758. struct arm_unwind_table_edit *next;
  2759. }
  2760. arm_unwind_table_edit;
  2761. typedef struct _arm_elf_section_data
  2762. {
  2763. /* Information about mapping symbols. */
  2764. struct bfd_elf_section_data elf;
  2765. unsigned int mapcount;
  2766. unsigned int mapsize;
  2767. elf32_arm_section_map *map;
  2768. /* Information about CPU errata. */
  2769. unsigned int erratumcount;
  2770. elf32_vfp11_erratum_list *erratumlist;
  2771. unsigned int stm32l4xx_erratumcount;
  2772. elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
  2773. unsigned int additional_reloc_count;
  2774. /* Information about unwind tables. */
  2775. union
  2776. {
  2777. /* Unwind info attached to a text section. */
  2778. struct
  2779. {
  2780. asection *arm_exidx_sec;
  2781. } text;
  2782. /* Unwind info attached to an .ARM.exidx section. */
  2783. struct
  2784. {
  2785. arm_unwind_table_edit *unwind_edit_list;
  2786. arm_unwind_table_edit *unwind_edit_tail;
  2787. } exidx;
  2788. } u;
  2789. }
  2790. _arm_elf_section_data;
  2791. #define elf32_arm_section_data(sec) \
  2792. ((_arm_elf_section_data *) elf_section_data (sec))
  2793. /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
  2794. These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
  2795. so may be created multiple times: we use an array of these entries whilst
  2796. relaxing which we can refresh easily, then create stubs for each potentially
  2797. erratum-triggering instruction once we've settled on a solution. */
  2798. struct a8_erratum_fix
  2799. {
  2800. bfd *input_bfd;
  2801. asection *section;
  2802. bfd_vma offset;
  2803. bfd_vma target_offset;
  2804. unsigned long orig_insn;
  2805. char *stub_name;
  2806. enum elf32_arm_stub_type stub_type;
  2807. enum arm_st_branch_type branch_type;
  2808. };
  2809. /* A table of relocs applied to branches which might trigger Cortex-A8
  2810. erratum. */
  2811. struct a8_erratum_reloc
  2812. {
  2813. bfd_vma from;
  2814. bfd_vma destination;
  2815. struct elf32_arm_link_hash_entry *hash;
  2816. const char *sym_name;
  2817. unsigned int r_type;
  2818. enum arm_st_branch_type branch_type;
  2819. bool non_a8_stub;
  2820. };
  2821. /* The size of the thread control block. */
  2822. #define TCB_SIZE 8
  2823. /* ARM-specific information about a PLT entry, over and above the usual
  2824. gotplt_union. */
  2825. struct arm_plt_info
  2826. {
  2827. /* We reference count Thumb references to a PLT entry separately,
  2828. so that we can emit the Thumb trampoline only if needed. */
  2829. bfd_signed_vma thumb_refcount;
  2830. /* Some references from Thumb code may be eliminated by BL->BLX
  2831. conversion, so record them separately. */
  2832. bfd_signed_vma maybe_thumb_refcount;
  2833. /* How many of the recorded PLT accesses were from non-call relocations.
  2834. This information is useful when deciding whether anything takes the
  2835. address of an STT_GNU_IFUNC PLT. A value of 0 means that all
  2836. non-call references to the function should resolve directly to the
  2837. real runtime target. */
  2838. unsigned int noncall_refcount;
  2839. /* Since PLT entries have variable size if the Thumb prologue is
  2840. used, we need to record the index into .got.plt instead of
  2841. recomputing it from the PLT offset. */
  2842. bfd_signed_vma got_offset;
  2843. };
  2844. /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
  2845. struct arm_local_iplt_info
  2846. {
  2847. /* The information that is usually found in the generic ELF part of
  2848. the hash table entry. */
  2849. union gotplt_union root;
  2850. /* The information that is usually found in the ARM-specific part of
  2851. the hash table entry. */
  2852. struct arm_plt_info arm;
  2853. /* A list of all potential dynamic relocations against this symbol. */
  2854. struct elf_dyn_relocs *dyn_relocs;
  2855. };
  2856. /* Structure to handle FDPIC support for local functions. */
  2857. struct fdpic_local
  2858. {
  2859. unsigned int funcdesc_cnt;
  2860. unsigned int gotofffuncdesc_cnt;
  2861. int funcdesc_offset;
  2862. };
  2863. struct elf_arm_obj_tdata
  2864. {
  2865. struct elf_obj_tdata root;
  2866. /* Zero to warn when linking objects with incompatible enum sizes. */
  2867. int no_enum_size_warning;
  2868. /* Zero to warn when linking objects with incompatible wchar_t sizes. */
  2869. int no_wchar_size_warning;
  2870. /* The number of entries in each of the arrays in this strcuture.
  2871. Used to avoid buffer overruns. */
  2872. bfd_size_type num_entries;
  2873. /* tls_type for each local got entry. */
  2874. char *local_got_tls_type;
  2875. /* GOTPLT entries for TLS descriptors. */
  2876. bfd_vma *local_tlsdesc_gotent;
  2877. /* Information for local symbols that need entries in .iplt. */
  2878. struct arm_local_iplt_info **local_iplt;
  2879. /* Maintains FDPIC counters and funcdesc info. */
  2880. struct fdpic_local *local_fdpic_cnts;
  2881. };
  2882. #define elf_arm_tdata(bfd) \
  2883. ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
  2884. #define elf32_arm_num_entries(bfd) \
  2885. (elf_arm_tdata (bfd)->num_entries)
  2886. #define elf32_arm_local_got_tls_type(bfd) \
  2887. (elf_arm_tdata (bfd)->local_got_tls_type)
  2888. #define elf32_arm_local_tlsdesc_gotent(bfd) \
  2889. (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
  2890. #define elf32_arm_local_iplt(bfd) \
  2891. (elf_arm_tdata (bfd)->local_iplt)
  2892. #define elf32_arm_local_fdpic_cnts(bfd) \
  2893. (elf_arm_tdata (bfd)->local_fdpic_cnts)
  2894. #define is_arm_elf(bfd) \
  2895. (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
  2896. && elf_tdata (bfd) != NULL \
  2897. && elf_object_id (bfd) == ARM_ELF_DATA)
  2898. static bool
  2899. elf32_arm_mkobject (bfd *abfd)
  2900. {
  2901. return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
  2902. ARM_ELF_DATA);
  2903. }
  2904. #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
  2905. /* Structure to handle FDPIC support for extern functions. */
  2906. struct fdpic_global {
  2907. unsigned int gotofffuncdesc_cnt;
  2908. unsigned int gotfuncdesc_cnt;
  2909. unsigned int funcdesc_cnt;
  2910. int funcdesc_offset;
  2911. int gotfuncdesc_offset;
  2912. };
  2913. /* Arm ELF linker hash entry. */
  2914. struct elf32_arm_link_hash_entry
  2915. {
  2916. struct elf_link_hash_entry root;
  2917. /* ARM-specific PLT information. */
  2918. struct arm_plt_info plt;
  2919. #define GOT_UNKNOWN 0
  2920. #define GOT_NORMAL 1
  2921. #define GOT_TLS_GD 2
  2922. #define GOT_TLS_IE 4
  2923. #define GOT_TLS_GDESC 8
  2924. #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
  2925. unsigned int tls_type : 8;
  2926. /* True if the symbol's PLT entry is in .iplt rather than .plt. */
  2927. unsigned int is_iplt : 1;
  2928. unsigned int unused : 23;
  2929. /* Offset of the GOTPLT entry reserved for the TLS descriptor,
  2930. starting at the end of the jump table. */
  2931. bfd_vma tlsdesc_got;
  2932. /* The symbol marking the real symbol location for exported thumb
  2933. symbols with Arm stubs. */
  2934. struct elf_link_hash_entry *export_glue;
  2935. /* A pointer to the most recently used stub hash entry against this
  2936. symbol. */
  2937. struct elf32_arm_stub_hash_entry *stub_cache;
  2938. /* Counter for FDPIC relocations against this symbol. */
  2939. struct fdpic_global fdpic_cnts;
  2940. };
  2941. /* Traverse an arm ELF linker hash table. */
  2942. #define elf32_arm_link_hash_traverse(table, func, info) \
  2943. (elf_link_hash_traverse \
  2944. (&(table)->root, \
  2945. (bool (*) (struct elf_link_hash_entry *, void *)) (func), \
  2946. (info)))
  2947. /* Get the ARM elf linker hash table from a link_info structure. */
  2948. #define elf32_arm_hash_table(p) \
  2949. ((is_elf_hash_table ((p)->hash) \
  2950. && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA) \
  2951. ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
  2952. #define arm_stub_hash_lookup(table, string, create, copy) \
  2953. ((struct elf32_arm_stub_hash_entry *) \
  2954. bfd_hash_lookup ((table), (string), (create), (copy)))
  2955. /* Array to keep track of which stub sections have been created, and
  2956. information on stub grouping. */
  2957. struct map_stub
  2958. {
  2959. /* This is the section to which stubs in the group will be
  2960. attached. */
  2961. asection *link_sec;
  2962. /* The stub section. */
  2963. asection *stub_sec;
  2964. };
  2965. #define elf32_arm_compute_jump_table_size(htab) \
  2966. ((htab)->next_tls_desc_index * 4)
  2967. /* ARM ELF linker hash table. */
  2968. struct elf32_arm_link_hash_table
  2969. {
  2970. /* The main hash table. */
  2971. struct elf_link_hash_table root;
  2972. /* The size in bytes of the section containing the Thumb-to-ARM glue. */
  2973. bfd_size_type thumb_glue_size;
  2974. /* The size in bytes of the section containing the ARM-to-Thumb glue. */
  2975. bfd_size_type arm_glue_size;
  2976. /* The size in bytes of section containing the ARMv4 BX veneers. */
  2977. bfd_size_type bx_glue_size;
  2978. /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
  2979. veneer has been populated. */
  2980. bfd_vma bx_glue_offset[15];
  2981. /* The size in bytes of the section containing glue for VFP11 erratum
  2982. veneers. */
  2983. bfd_size_type vfp11_erratum_glue_size;
  2984. /* The size in bytes of the section containing glue for STM32L4XX erratum
  2985. veneers. */
  2986. bfd_size_type stm32l4xx_erratum_glue_size;
  2987. /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
  2988. holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
  2989. elf32_arm_write_section(). */
  2990. struct a8_erratum_fix *a8_erratum_fixes;
  2991. unsigned int num_a8_erratum_fixes;
  2992. /* An arbitrary input BFD chosen to hold the glue sections. */
  2993. bfd * bfd_of_glue_owner;
  2994. /* Nonzero to output a BE8 image. */
  2995. int byteswap_code;
  2996. /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
  2997. Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
  2998. int target1_is_rel;
  2999. /* The relocation to use for R_ARM_TARGET2 relocations. */
  3000. int target2_reloc;
  3001. /* 0 = Ignore R_ARM_V4BX.
  3002. 1 = Convert BX to MOV PC.
  3003. 2 = Generate v4 interworing stubs. */
  3004. int fix_v4bx;
  3005. /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
  3006. int fix_cortex_a8;
  3007. /* Whether we should fix the ARM1176 BLX immediate issue. */
  3008. int fix_arm1176;
  3009. /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
  3010. int use_blx;
  3011. /* What sort of code sequences we should look for which may trigger the
  3012. VFP11 denorm erratum. */
  3013. bfd_arm_vfp11_fix vfp11_fix;
  3014. /* Global counter for the number of fixes we have emitted. */
  3015. int num_vfp11_fixes;
  3016. /* What sort of code sequences we should look for which may trigger the
  3017. STM32L4XX erratum. */
  3018. bfd_arm_stm32l4xx_fix stm32l4xx_fix;
  3019. /* Global counter for the number of fixes we have emitted. */
  3020. int num_stm32l4xx_fixes;
  3021. /* Nonzero to force PIC branch veneers. */
  3022. int pic_veneer;
  3023. /* The number of bytes in the initial entry in the PLT. */
  3024. bfd_size_type plt_header_size;
  3025. /* The number of bytes in the subsequent PLT etries. */
  3026. bfd_size_type plt_entry_size;
  3027. /* True if the target uses REL relocations. */
  3028. bool use_rel;
  3029. /* Nonzero if import library must be a secure gateway import library
  3030. as per ARMv8-M Security Extensions. */
  3031. int cmse_implib;
  3032. /* The import library whose symbols' address must remain stable in
  3033. the import library generated. */
  3034. bfd *in_implib_bfd;
  3035. /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
  3036. bfd_vma next_tls_desc_index;
  3037. /* How many R_ARM_TLS_DESC relocations were generated so far. */
  3038. bfd_vma num_tls_desc;
  3039. /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
  3040. asection *srelplt2;
  3041. /* Offset in .plt section of tls_arm_trampoline. */
  3042. bfd_vma tls_trampoline;
  3043. /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
  3044. union
  3045. {
  3046. bfd_signed_vma refcount;
  3047. bfd_vma offset;
  3048. } tls_ldm_got;
  3049. /* For convenience in allocate_dynrelocs. */
  3050. bfd * obfd;
  3051. /* The amount of space used by the reserved portion of the sgotplt
  3052. section, plus whatever space is used by the jump slots. */
  3053. bfd_vma sgotplt_jump_table_size;
  3054. /* The stub hash table. */
  3055. struct bfd_hash_table stub_hash_table;
  3056. /* Linker stub bfd. */
  3057. bfd *stub_bfd;
  3058. /* Linker call-backs. */
  3059. asection * (*add_stub_section) (const char *, asection *, asection *,
  3060. unsigned int);
  3061. void (*layout_sections_again) (void);
  3062. /* Array to keep track of which stub sections have been created, and
  3063. information on stub grouping. */
  3064. struct map_stub *stub_group;
  3065. /* Input stub section holding secure gateway veneers. */
  3066. asection *cmse_stub_sec;
  3067. /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
  3068. start to be allocated. */
  3069. bfd_vma new_cmse_stub_offset;
  3070. /* Number of elements in stub_group. */
  3071. unsigned int top_id;
  3072. /* Assorted information used by elf32_arm_size_stubs. */
  3073. unsigned int bfd_count;
  3074. unsigned int top_index;
  3075. asection **input_list;
  3076. /* True if the target system uses FDPIC. */
  3077. int fdpic_p;
  3078. /* Fixup section. Used for FDPIC. */
  3079. asection *srofixup;
  3080. };
  3081. /* Add an FDPIC read-only fixup. */
  3082. static void
  3083. arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
  3084. {
  3085. bfd_vma fixup_offset;
  3086. fixup_offset = srofixup->reloc_count++ * 4;
  3087. BFD_ASSERT (fixup_offset < srofixup->size);
  3088. bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
  3089. }
  3090. static inline int
  3091. ctz (unsigned int mask)
  3092. {
  3093. #if GCC_VERSION >= 3004
  3094. return __builtin_ctz (mask);
  3095. #else
  3096. unsigned int i;
  3097. for (i = 0; i < 8 * sizeof (mask); i++)
  3098. {
  3099. if (mask & 0x1)
  3100. break;
  3101. mask = (mask >> 1);
  3102. }
  3103. return i;
  3104. #endif
  3105. }
  3106. static inline int
  3107. elf32_arm_popcount (unsigned int mask)
  3108. {
  3109. #if GCC_VERSION >= 3004
  3110. return __builtin_popcount (mask);
  3111. #else
  3112. unsigned int i;
  3113. int sum = 0;
  3114. for (i = 0; i < 8 * sizeof (mask); i++)
  3115. {
  3116. if (mask & 0x1)
  3117. sum++;
  3118. mask = (mask >> 1);
  3119. }
  3120. return sum;
  3121. #endif
  3122. }
  3123. static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
  3124. asection *sreloc, Elf_Internal_Rela *rel);
  3125. static void
  3126. arm_elf_fill_funcdesc (bfd *output_bfd,
  3127. struct bfd_link_info *info,
  3128. int *funcdesc_offset,
  3129. int dynindx,
  3130. int offset,
  3131. bfd_vma addr,
  3132. bfd_vma dynreloc_value,
  3133. bfd_vma seg)
  3134. {
  3135. if ((*funcdesc_offset & 1) == 0)
  3136. {
  3137. struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
  3138. asection *sgot = globals->root.sgot;
  3139. if (bfd_link_pic (info))
  3140. {
  3141. asection *srelgot = globals->root.srelgot;
  3142. Elf_Internal_Rela outrel;
  3143. outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
  3144. outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
  3145. outrel.r_addend = 0;
  3146. elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
  3147. bfd_put_32 (output_bfd, addr, sgot->contents + offset);
  3148. bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
  3149. }
  3150. else
  3151. {
  3152. struct elf_link_hash_entry *hgot = globals->root.hgot;
  3153. bfd_vma got_value = hgot->root.u.def.value
  3154. + hgot->root.u.def.section->output_section->vma
  3155. + hgot->root.u.def.section->output_offset;
  3156. arm_elf_add_rofixup (output_bfd, globals->srofixup,
  3157. sgot->output_section->vma + sgot->output_offset
  3158. + offset);
  3159. arm_elf_add_rofixup (output_bfd, globals->srofixup,
  3160. sgot->output_section->vma + sgot->output_offset
  3161. + offset + 4);
  3162. bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
  3163. bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
  3164. }
  3165. *funcdesc_offset |= 1;
  3166. }
  3167. }
  3168. /* Create an entry in an ARM ELF linker hash table. */
  3169. static struct bfd_hash_entry *
  3170. elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
  3171. struct bfd_hash_table * table,
  3172. const char * string)
  3173. {
  3174. struct elf32_arm_link_hash_entry * ret =
  3175. (struct elf32_arm_link_hash_entry *) entry;
  3176. /* Allocate the structure if it has not already been allocated by a
  3177. subclass. */
  3178. if (ret == NULL)
  3179. ret = (struct elf32_arm_link_hash_entry *)
  3180. bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
  3181. if (ret == NULL)
  3182. return (struct bfd_hash_entry *) ret;
  3183. /* Call the allocation method of the superclass. */
  3184. ret = ((struct elf32_arm_link_hash_entry *)
  3185. _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
  3186. table, string));
  3187. if (ret != NULL)
  3188. {
  3189. ret->tls_type = GOT_UNKNOWN;
  3190. ret->tlsdesc_got = (bfd_vma) -1;
  3191. ret->plt.thumb_refcount = 0;
  3192. ret->plt.maybe_thumb_refcount = 0;
  3193. ret->plt.noncall_refcount = 0;
  3194. ret->plt.got_offset = -1;
  3195. ret->is_iplt = false;
  3196. ret->export_glue = NULL;
  3197. ret->stub_cache = NULL;
  3198. ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
  3199. ret->fdpic_cnts.gotfuncdesc_cnt = 0;
  3200. ret->fdpic_cnts.funcdesc_cnt = 0;
  3201. ret->fdpic_cnts.funcdesc_offset = -1;
  3202. ret->fdpic_cnts.gotfuncdesc_offset = -1;
  3203. }
  3204. return (struct bfd_hash_entry *) ret;
  3205. }
  3206. /* Ensure that we have allocated bookkeeping structures for ABFD's local
  3207. symbols. */
  3208. static bool
  3209. elf32_arm_allocate_local_sym_info (bfd *abfd)
  3210. {
  3211. if (elf_local_got_refcounts (abfd) == NULL)
  3212. {
  3213. bfd_size_type num_syms;
  3214. elf32_arm_num_entries (abfd) = 0;
  3215. /* Whilst it might be tempting to allocate a single block of memory and
  3216. then divide it up amoungst the arrays in the elf_arm_obj_tdata
  3217. structure, this interferes with the work of memory checkers looking
  3218. for buffer overruns. So allocate each array individually. */
  3219. num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
  3220. elf_local_got_refcounts (abfd) = bfd_zalloc
  3221. (abfd, num_syms * sizeof (* elf_local_got_refcounts (abfd)));
  3222. if (elf_local_got_refcounts (abfd) == NULL)
  3223. return false;
  3224. elf32_arm_local_tlsdesc_gotent (abfd) = bfd_zalloc
  3225. (abfd, num_syms * sizeof (* elf32_arm_local_tlsdesc_gotent (abfd)));
  3226. if (elf32_arm_local_tlsdesc_gotent (abfd) == NULL)
  3227. return false;
  3228. elf32_arm_local_iplt (abfd) = bfd_zalloc
  3229. (abfd, num_syms * sizeof (* elf32_arm_local_iplt (abfd)));
  3230. if (elf32_arm_local_iplt (abfd) == NULL)
  3231. return false;
  3232. elf32_arm_local_fdpic_cnts (abfd) = bfd_zalloc
  3233. (abfd, num_syms * sizeof (* elf32_arm_local_fdpic_cnts (abfd)));
  3234. if (elf32_arm_local_fdpic_cnts (abfd) == NULL)
  3235. return false;
  3236. elf32_arm_local_got_tls_type (abfd) = bfd_zalloc
  3237. (abfd, num_syms * sizeof (* elf32_arm_local_got_tls_type (abfd)));
  3238. if (elf32_arm_local_got_tls_type (abfd) == NULL)
  3239. return false;
  3240. elf32_arm_num_entries (abfd) = num_syms;
  3241. #if GCC_VERSION >= 3000
  3242. BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd))
  3243. <= __alignof__ (*elf_local_got_refcounts (abfd)));
  3244. BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd))
  3245. <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd)));
  3246. BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd))
  3247. <= __alignof__ (*elf32_arm_local_iplt (abfd)));
  3248. BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd))
  3249. <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd)));
  3250. #endif
  3251. }
  3252. return true;
  3253. }
  3254. /* Return the .iplt information for local symbol R_SYMNDX, which belongs
  3255. to input bfd ABFD. Create the information if it doesn't already exist.
  3256. Return null if an allocation fails. */
  3257. static struct arm_local_iplt_info *
  3258. elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
  3259. {
  3260. struct arm_local_iplt_info **ptr;
  3261. if (!elf32_arm_allocate_local_sym_info (abfd))
  3262. return NULL;
  3263. BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
  3264. BFD_ASSERT (r_symndx < elf32_arm_num_entries (abfd));
  3265. ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
  3266. if (*ptr == NULL)
  3267. *ptr = bfd_zalloc (abfd, sizeof (**ptr));
  3268. return *ptr;
  3269. }
  3270. /* Try to obtain PLT information for the symbol with index R_SYMNDX
  3271. in ABFD's symbol table. If the symbol is global, H points to its
  3272. hash table entry, otherwise H is null.
  3273. Return true if the symbol does have PLT information. When returning
  3274. true, point *ROOT_PLT at the target-independent reference count/offset
  3275. union and *ARM_PLT at the ARM-specific information. */
  3276. static bool
  3277. elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
  3278. struct elf32_arm_link_hash_entry *h,
  3279. unsigned long r_symndx, union gotplt_union **root_plt,
  3280. struct arm_plt_info **arm_plt)
  3281. {
  3282. struct arm_local_iplt_info *local_iplt;
  3283. if (globals->root.splt == NULL && globals->root.iplt == NULL)
  3284. return false;
  3285. if (h != NULL)
  3286. {
  3287. *root_plt = &h->root.plt;
  3288. *arm_plt = &h->plt;
  3289. return true;
  3290. }
  3291. if (elf32_arm_local_iplt (abfd) == NULL)
  3292. return false;
  3293. if (r_symndx >= elf32_arm_num_entries (abfd))
  3294. return false;
  3295. local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
  3296. if (local_iplt == NULL)
  3297. return false;
  3298. *root_plt = &local_iplt->root;
  3299. *arm_plt = &local_iplt->arm;
  3300. return true;
  3301. }
  3302. static bool using_thumb_only (struct elf32_arm_link_hash_table *globals);
  3303. /* Return true if the PLT described by ARM_PLT requires a Thumb stub
  3304. before it. */
  3305. static bool
  3306. elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
  3307. struct arm_plt_info *arm_plt)
  3308. {
  3309. struct elf32_arm_link_hash_table *htab;
  3310. htab = elf32_arm_hash_table (info);
  3311. return (!using_thumb_only (htab) && (arm_plt->thumb_refcount != 0
  3312. || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
  3313. }
  3314. /* Return a pointer to the head of the dynamic reloc list that should
  3315. be used for local symbol ISYM, which is symbol number R_SYMNDX in
  3316. ABFD's symbol table. Return null if an error occurs. */
  3317. static struct elf_dyn_relocs **
  3318. elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
  3319. Elf_Internal_Sym *isym)
  3320. {
  3321. if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
  3322. {
  3323. struct arm_local_iplt_info *local_iplt;
  3324. local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
  3325. if (local_iplt == NULL)
  3326. return NULL;
  3327. return &local_iplt->dyn_relocs;
  3328. }
  3329. else
  3330. {
  3331. /* Track dynamic relocs needed for local syms too.
  3332. We really need local syms available to do this
  3333. easily. Oh well. */
  3334. asection *s;
  3335. void *vpp;
  3336. s = bfd_section_from_elf_index (abfd, isym->st_shndx);
  3337. if (s == NULL)
  3338. return NULL;
  3339. vpp = &elf_section_data (s)->local_dynrel;
  3340. return (struct elf_dyn_relocs **) vpp;
  3341. }
  3342. }
  3343. /* Initialize an entry in the stub hash table. */
  3344. static struct bfd_hash_entry *
  3345. stub_hash_newfunc (struct bfd_hash_entry *entry,
  3346. struct bfd_hash_table *table,
  3347. const char *string)
  3348. {
  3349. /* Allocate the structure if it has not already been allocated by a
  3350. subclass. */
  3351. if (entry == NULL)
  3352. {
  3353. entry = (struct bfd_hash_entry *)
  3354. bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
  3355. if (entry == NULL)
  3356. return entry;
  3357. }
  3358. /* Call the allocation method of the superclass. */
  3359. entry = bfd_hash_newfunc (entry, table, string);
  3360. if (entry != NULL)
  3361. {
  3362. struct elf32_arm_stub_hash_entry *eh;
  3363. /* Initialize the local fields. */
  3364. eh = (struct elf32_arm_stub_hash_entry *) entry;
  3365. eh->stub_sec = NULL;
  3366. eh->stub_offset = (bfd_vma) -1;
  3367. eh->source_value = 0;
  3368. eh->target_value = 0;
  3369. eh->target_section = NULL;
  3370. eh->orig_insn = 0;
  3371. eh->stub_type = arm_stub_none;
  3372. eh->stub_size = 0;
  3373. eh->stub_template = NULL;
  3374. eh->stub_template_size = -1;
  3375. eh->h = NULL;
  3376. eh->id_sec = NULL;
  3377. eh->output_name = NULL;
  3378. }
  3379. return entry;
  3380. }
  3381. /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
  3382. shortcuts to them in our hash table. */
  3383. static bool
  3384. create_got_section (bfd *dynobj, struct bfd_link_info *info)
  3385. {
  3386. struct elf32_arm_link_hash_table *htab;
  3387. htab = elf32_arm_hash_table (info);
  3388. if (htab == NULL)
  3389. return false;
  3390. if (! _bfd_elf_create_got_section (dynobj, info))
  3391. return false;
  3392. /* Also create .rofixup. */
  3393. if (htab->fdpic_p)
  3394. {
  3395. htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
  3396. (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
  3397. | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
  3398. if (htab->srofixup == NULL
  3399. || !bfd_set_section_alignment (htab->srofixup, 2))
  3400. return false;
  3401. }
  3402. return true;
  3403. }
  3404. /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
  3405. static bool
  3406. create_ifunc_sections (struct bfd_link_info *info)
  3407. {
  3408. struct elf32_arm_link_hash_table *htab;
  3409. const struct elf_backend_data *bed;
  3410. bfd *dynobj;
  3411. asection *s;
  3412. flagword flags;
  3413. htab = elf32_arm_hash_table (info);
  3414. dynobj = htab->root.dynobj;
  3415. bed = get_elf_backend_data (dynobj);
  3416. flags = bed->dynamic_sec_flags;
  3417. if (htab->root.iplt == NULL)
  3418. {
  3419. s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
  3420. flags | SEC_READONLY | SEC_CODE);
  3421. if (s == NULL
  3422. || !bfd_set_section_alignment (s, bed->plt_alignment))
  3423. return false;
  3424. htab->root.iplt = s;
  3425. }
  3426. if (htab->root.irelplt == NULL)
  3427. {
  3428. s = bfd_make_section_anyway_with_flags (dynobj,
  3429. RELOC_SECTION (htab, ".iplt"),
  3430. flags | SEC_READONLY);
  3431. if (s == NULL
  3432. || !bfd_set_section_alignment (s, bed->s->log_file_align))
  3433. return false;
  3434. htab->root.irelplt = s;
  3435. }
  3436. if (htab->root.igotplt == NULL)
  3437. {
  3438. s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
  3439. if (s == NULL
  3440. || !bfd_set_section_alignment (s, bed->s->log_file_align))
  3441. return false;
  3442. htab->root.igotplt = s;
  3443. }
  3444. return true;
  3445. }
  3446. /* Determine if we're dealing with a Thumb only architecture. */
  3447. static bool
  3448. using_thumb_only (struct elf32_arm_link_hash_table *globals)
  3449. {
  3450. int arch;
  3451. int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
  3452. Tag_CPU_arch_profile);
  3453. if (profile)
  3454. return profile == 'M';
  3455. arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
  3456. /* Force return logic to be reviewed for each new architecture. */
  3457. BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
  3458. if (arch == TAG_CPU_ARCH_V6_M
  3459. || arch == TAG_CPU_ARCH_V6S_M
  3460. || arch == TAG_CPU_ARCH_V7E_M
  3461. || arch == TAG_CPU_ARCH_V8M_BASE
  3462. || arch == TAG_CPU_ARCH_V8M_MAIN
  3463. || arch == TAG_CPU_ARCH_V8_1M_MAIN)
  3464. return true;
  3465. return false;
  3466. }
  3467. /* Determine if we're dealing with a Thumb-2 object. */
  3468. static bool
  3469. using_thumb2 (struct elf32_arm_link_hash_table *globals)
  3470. {
  3471. int arch;
  3472. int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
  3473. Tag_THUMB_ISA_use);
  3474. /* No use of thumb permitted, or a legacy thumb-1/2 definition. */
  3475. if (thumb_isa < 3)
  3476. return thumb_isa == 2;
  3477. /* Variant of thumb is described by the architecture tag. */
  3478. arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
  3479. /* Force return logic to be reviewed for each new architecture. */
  3480. BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
  3481. return (arch == TAG_CPU_ARCH_V6T2
  3482. || arch == TAG_CPU_ARCH_V7
  3483. || arch == TAG_CPU_ARCH_V7E_M
  3484. || arch == TAG_CPU_ARCH_V8
  3485. || arch == TAG_CPU_ARCH_V8R
  3486. || arch == TAG_CPU_ARCH_V8M_MAIN
  3487. || arch == TAG_CPU_ARCH_V8_1M_MAIN);
  3488. }
  3489. /* Determine whether Thumb-2 BL instruction is available. */
  3490. static bool
  3491. using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
  3492. {
  3493. int arch =
  3494. bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
  3495. /* Force return logic to be reviewed for each new architecture. */
  3496. BFD_ASSERT (arch <= TAG_CPU_ARCH_V9);
  3497. /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
  3498. return (arch == TAG_CPU_ARCH_V6T2
  3499. || arch >= TAG_CPU_ARCH_V7);
  3500. }
  3501. /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
  3502. .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
  3503. hash table. */
  3504. static bool
  3505. elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
  3506. {
  3507. struct elf32_arm_link_hash_table *htab;
  3508. htab = elf32_arm_hash_table (info);
  3509. if (htab == NULL)
  3510. return false;
  3511. if (!htab->root.sgot && !create_got_section (dynobj, info))
  3512. return false;
  3513. if (!_bfd_elf_create_dynamic_sections (dynobj, info))
  3514. return false;
  3515. if (htab->root.target_os == is_vxworks)
  3516. {
  3517. if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
  3518. return false;
  3519. if (bfd_link_pic (info))
  3520. {
  3521. htab->plt_header_size = 0;
  3522. htab->plt_entry_size
  3523. = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
  3524. }
  3525. else
  3526. {
  3527. htab->plt_header_size
  3528. = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
  3529. htab->plt_entry_size
  3530. = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
  3531. }
  3532. if (elf_elfheader (dynobj))
  3533. elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
  3534. }
  3535. else
  3536. {
  3537. /* PR ld/16017
  3538. Test for thumb only architectures. Note - we cannot just call
  3539. using_thumb_only() as the attributes in the output bfd have not been
  3540. initialised at this point, so instead we use the input bfd. */
  3541. bfd * saved_obfd = htab->obfd;
  3542. htab->obfd = dynobj;
  3543. if (using_thumb_only (htab))
  3544. {
  3545. htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
  3546. htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
  3547. }
  3548. htab->obfd = saved_obfd;
  3549. }
  3550. if (htab->fdpic_p) {
  3551. htab->plt_header_size = 0;
  3552. if (info->flags & DF_BIND_NOW)
  3553. htab->plt_entry_size = 4 * (ARRAY_SIZE (elf32_arm_fdpic_plt_entry) - 5);
  3554. else
  3555. htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry);
  3556. }
  3557. if (!htab->root.splt
  3558. || !htab->root.srelplt
  3559. || !htab->root.sdynbss
  3560. || (!bfd_link_pic (info) && !htab->root.srelbss))
  3561. abort ();
  3562. return true;
  3563. }
  3564. /* Copy the extra info we tack onto an elf_link_hash_entry. */
  3565. static void
  3566. elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
  3567. struct elf_link_hash_entry *dir,
  3568. struct elf_link_hash_entry *ind)
  3569. {
  3570. struct elf32_arm_link_hash_entry *edir, *eind;
  3571. edir = (struct elf32_arm_link_hash_entry *) dir;
  3572. eind = (struct elf32_arm_link_hash_entry *) ind;
  3573. if (ind->root.type == bfd_link_hash_indirect)
  3574. {
  3575. /* Copy over PLT info. */
  3576. edir->plt.thumb_refcount += eind->plt.thumb_refcount;
  3577. eind->plt.thumb_refcount = 0;
  3578. edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
  3579. eind->plt.maybe_thumb_refcount = 0;
  3580. edir->plt.noncall_refcount += eind->plt.noncall_refcount;
  3581. eind->plt.noncall_refcount = 0;
  3582. /* Copy FDPIC counters. */
  3583. edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
  3584. edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
  3585. edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
  3586. /* We should only allocate a function to .iplt once the final
  3587. symbol information is known. */
  3588. BFD_ASSERT (!eind->is_iplt);
  3589. if (dir->got.refcount <= 0)
  3590. {
  3591. edir->tls_type = eind->tls_type;
  3592. eind->tls_type = GOT_UNKNOWN;
  3593. }
  3594. }
  3595. _bfd_elf_link_hash_copy_indirect (info, dir, ind);
  3596. }
  3597. /* Destroy an ARM elf linker hash table. */
  3598. static void
  3599. elf32_arm_link_hash_table_free (bfd *obfd)
  3600. {
  3601. struct elf32_arm_link_hash_table *ret
  3602. = (struct elf32_arm_link_hash_table *) obfd->link.hash;
  3603. bfd_hash_table_free (&ret->stub_hash_table);
  3604. _bfd_elf_link_hash_table_free (obfd);
  3605. }
  3606. /* Create an ARM elf linker hash table. */
  3607. static struct bfd_link_hash_table *
  3608. elf32_arm_link_hash_table_create (bfd *abfd)
  3609. {
  3610. struct elf32_arm_link_hash_table *ret;
  3611. size_t amt = sizeof (struct elf32_arm_link_hash_table);
  3612. ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
  3613. if (ret == NULL)
  3614. return NULL;
  3615. if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
  3616. elf32_arm_link_hash_newfunc,
  3617. sizeof (struct elf32_arm_link_hash_entry),
  3618. ARM_ELF_DATA))
  3619. {
  3620. free (ret);
  3621. return NULL;
  3622. }
  3623. ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
  3624. ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
  3625. #ifdef FOUR_WORD_PLT
  3626. ret->plt_header_size = 16;
  3627. ret->plt_entry_size = 16;
  3628. #else
  3629. ret->plt_header_size = 20;
  3630. ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
  3631. #endif
  3632. ret->use_rel = true;
  3633. ret->obfd = abfd;
  3634. ret->fdpic_p = 0;
  3635. if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
  3636. sizeof (struct elf32_arm_stub_hash_entry)))
  3637. {
  3638. _bfd_elf_link_hash_table_free (abfd);
  3639. return NULL;
  3640. }
  3641. ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
  3642. return &ret->root.root;
  3643. }
  3644. /* Determine what kind of NOPs are available. */
  3645. static bool
  3646. arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
  3647. {
  3648. const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
  3649. Tag_CPU_arch);
  3650. /* Force return logic to be reviewed for each new architecture. */
  3651. BFD_ASSERT (arch <= TAG_CPU_ARCH_V9);
  3652. return (arch == TAG_CPU_ARCH_V6T2
  3653. || arch == TAG_CPU_ARCH_V6K
  3654. || arch == TAG_CPU_ARCH_V7
  3655. || arch == TAG_CPU_ARCH_V8
  3656. || arch == TAG_CPU_ARCH_V8R
  3657. || arch == TAG_CPU_ARCH_V9);
  3658. }
  3659. static bool
  3660. arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
  3661. {
  3662. switch (stub_type)
  3663. {
  3664. case arm_stub_long_branch_thumb_only:
  3665. case arm_stub_long_branch_thumb2_only:
  3666. case arm_stub_long_branch_thumb2_only_pure:
  3667. case arm_stub_long_branch_v4t_thumb_arm:
  3668. case arm_stub_short_branch_v4t_thumb_arm:
  3669. case arm_stub_long_branch_v4t_thumb_arm_pic:
  3670. case arm_stub_long_branch_v4t_thumb_tls_pic:
  3671. case arm_stub_long_branch_thumb_only_pic:
  3672. case arm_stub_cmse_branch_thumb_only:
  3673. return true;
  3674. case arm_stub_none:
  3675. BFD_FAIL ();
  3676. return false;
  3677. break;
  3678. default:
  3679. return false;
  3680. }
  3681. }
  3682. /* Determine the type of stub needed, if any, for a call. */
  3683. static enum elf32_arm_stub_type
  3684. arm_type_of_stub (struct bfd_link_info *info,
  3685. asection *input_sec,
  3686. const Elf_Internal_Rela *rel,
  3687. unsigned char st_type,
  3688. enum arm_st_branch_type *actual_branch_type,
  3689. struct elf32_arm_link_hash_entry *hash,
  3690. bfd_vma destination,
  3691. asection *sym_sec,
  3692. bfd *input_bfd,
  3693. const char *name)
  3694. {
  3695. bfd_vma location;
  3696. bfd_signed_vma branch_offset;
  3697. unsigned int r_type;
  3698. struct elf32_arm_link_hash_table * globals;
  3699. bool thumb2, thumb2_bl, thumb_only;
  3700. enum elf32_arm_stub_type stub_type = arm_stub_none;
  3701. int use_plt = 0;
  3702. enum arm_st_branch_type branch_type = *actual_branch_type;
  3703. union gotplt_union *root_plt;
  3704. struct arm_plt_info *arm_plt;
  3705. int arch;
  3706. int thumb2_movw;
  3707. if (branch_type == ST_BRANCH_LONG)
  3708. return stub_type;
  3709. globals = elf32_arm_hash_table (info);
  3710. if (globals == NULL)
  3711. return stub_type;
  3712. thumb_only = using_thumb_only (globals);
  3713. thumb2 = using_thumb2 (globals);
  3714. thumb2_bl = using_thumb2_bl (globals);
  3715. arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
  3716. /* True for architectures that implement the thumb2 movw instruction. */
  3717. thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
  3718. /* Determine where the call point is. */
  3719. location = (input_sec->output_offset
  3720. + input_sec->output_section->vma
  3721. + rel->r_offset);
  3722. r_type = ELF32_R_TYPE (rel->r_info);
  3723. /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
  3724. are considering a function call relocation. */
  3725. if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
  3726. || r_type == R_ARM_THM_JUMP19)
  3727. && branch_type == ST_BRANCH_TO_ARM)
  3728. branch_type = ST_BRANCH_TO_THUMB;
  3729. /* For TLS call relocs, it is the caller's responsibility to provide
  3730. the address of the appropriate trampoline. */
  3731. if (r_type != R_ARM_TLS_CALL
  3732. && r_type != R_ARM_THM_TLS_CALL
  3733. && elf32_arm_get_plt_info (input_bfd, globals, hash,
  3734. ELF32_R_SYM (rel->r_info), &root_plt,
  3735. &arm_plt)
  3736. && root_plt->offset != (bfd_vma) -1)
  3737. {
  3738. asection *splt;
  3739. if (hash == NULL || hash->is_iplt)
  3740. splt = globals->root.iplt;
  3741. else
  3742. splt = globals->root.splt;
  3743. if (splt != NULL)
  3744. {
  3745. use_plt = 1;
  3746. /* Note when dealing with PLT entries: the main PLT stub is in
  3747. ARM mode, so if the branch is in Thumb mode, another
  3748. Thumb->ARM stub will be inserted later just before the ARM
  3749. PLT stub. If a long branch stub is needed, we'll add a
  3750. Thumb->Arm one and branch directly to the ARM PLT entry.
  3751. Here, we have to check if a pre-PLT Thumb->ARM stub
  3752. is needed and if it will be close enough. */
  3753. destination = (splt->output_section->vma
  3754. + splt->output_offset
  3755. + root_plt->offset);
  3756. st_type = STT_FUNC;
  3757. /* Thumb branch/call to PLT: it can become a branch to ARM
  3758. or to Thumb. We must perform the same checks and
  3759. corrections as in elf32_arm_final_link_relocate. */
  3760. if ((r_type == R_ARM_THM_CALL)
  3761. || (r_type == R_ARM_THM_JUMP24))
  3762. {
  3763. if (globals->use_blx
  3764. && r_type == R_ARM_THM_CALL
  3765. && !thumb_only)
  3766. {
  3767. /* If the Thumb BLX instruction is available, convert
  3768. the BL to a BLX instruction to call the ARM-mode
  3769. PLT entry. */
  3770. branch_type = ST_BRANCH_TO_ARM;
  3771. }
  3772. else
  3773. {
  3774. if (!thumb_only)
  3775. /* Target the Thumb stub before the ARM PLT entry. */
  3776. destination -= PLT_THUMB_STUB_SIZE;
  3777. branch_type = ST_BRANCH_TO_THUMB;
  3778. }
  3779. }
  3780. else
  3781. {
  3782. branch_type = ST_BRANCH_TO_ARM;
  3783. }
  3784. }
  3785. }
  3786. /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
  3787. BFD_ASSERT (st_type != STT_GNU_IFUNC);
  3788. branch_offset = (bfd_signed_vma)(destination - location);
  3789. if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
  3790. || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
  3791. {
  3792. /* Handle cases where:
  3793. - this call goes too far (different Thumb/Thumb2 max
  3794. distance)
  3795. - it's a Thumb->Arm call and blx is not available, or it's a
  3796. Thumb->Arm branch (not bl). A stub is needed in this case,
  3797. but only if this call is not through a PLT entry. Indeed,
  3798. PLT stubs handle mode switching already. */
  3799. if ((!thumb2_bl
  3800. && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
  3801. || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
  3802. || (thumb2_bl
  3803. && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
  3804. || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
  3805. || (thumb2
  3806. && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
  3807. || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
  3808. && (r_type == R_ARM_THM_JUMP19))
  3809. || (branch_type == ST_BRANCH_TO_ARM
  3810. && (((r_type == R_ARM_THM_CALL
  3811. || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
  3812. || (r_type == R_ARM_THM_JUMP24)
  3813. || (r_type == R_ARM_THM_JUMP19))
  3814. && !use_plt))
  3815. {
  3816. /* If we need to insert a Thumb-Thumb long branch stub to a
  3817. PLT, use one that branches directly to the ARM PLT
  3818. stub. If we pretended we'd use the pre-PLT Thumb->ARM
  3819. stub, undo this now. */
  3820. if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
  3821. {
  3822. branch_type = ST_BRANCH_TO_ARM;
  3823. branch_offset += PLT_THUMB_STUB_SIZE;
  3824. }
  3825. if (branch_type == ST_BRANCH_TO_THUMB)
  3826. {
  3827. /* Thumb to thumb. */
  3828. if (!thumb_only)
  3829. {
  3830. if (input_sec->flags & SEC_ELF_PURECODE)
  3831. _bfd_error_handler
  3832. (_("%pB(%pA): warning: long branch veneers used in"
  3833. " section with SHF_ARM_PURECODE section"
  3834. " attribute is only supported for M-profile"
  3835. " targets that implement the movw instruction"),
  3836. input_bfd, input_sec);
  3837. stub_type = (bfd_link_pic (info) | globals->pic_veneer)
  3838. /* PIC stubs. */
  3839. ? ((globals->use_blx
  3840. && (r_type == R_ARM_THM_CALL))
  3841. /* V5T and above. Stub starts with ARM code, so
  3842. we must be able to switch mode before
  3843. reaching it, which is only possible for 'bl'
  3844. (ie R_ARM_THM_CALL relocation). */
  3845. ? arm_stub_long_branch_any_thumb_pic
  3846. /* On V4T, use Thumb code only. */
  3847. : arm_stub_long_branch_v4t_thumb_thumb_pic)
  3848. /* non-PIC stubs. */
  3849. : ((globals->use_blx
  3850. && (r_type == R_ARM_THM_CALL))
  3851. /* V5T and above. */
  3852. ? arm_stub_long_branch_any_any
  3853. /* V4T. */
  3854. : arm_stub_long_branch_v4t_thumb_thumb);
  3855. }
  3856. else
  3857. {
  3858. if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
  3859. stub_type = arm_stub_long_branch_thumb2_only_pure;
  3860. else
  3861. {
  3862. if (input_sec->flags & SEC_ELF_PURECODE)
  3863. _bfd_error_handler
  3864. (_("%pB(%pA): warning: long branch veneers used in"
  3865. " section with SHF_ARM_PURECODE section"
  3866. " attribute is only supported for M-profile"
  3867. " targets that implement the movw instruction"),
  3868. input_bfd, input_sec);
  3869. stub_type = (bfd_link_pic (info) | globals->pic_veneer)
  3870. /* PIC stub. */
  3871. ? arm_stub_long_branch_thumb_only_pic
  3872. /* non-PIC stub. */
  3873. : (thumb2 ? arm_stub_long_branch_thumb2_only
  3874. : arm_stub_long_branch_thumb_only);
  3875. }
  3876. }
  3877. }
  3878. else
  3879. {
  3880. if (input_sec->flags & SEC_ELF_PURECODE)
  3881. _bfd_error_handler
  3882. (_("%pB(%pA): warning: long branch veneers used in"
  3883. " section with SHF_ARM_PURECODE section"
  3884. " attribute is only supported" " for M-profile"
  3885. " targets that implement the movw instruction"),
  3886. input_bfd, input_sec);
  3887. /* Thumb to arm. */
  3888. if (sym_sec != NULL
  3889. && sym_sec->owner != NULL
  3890. && !INTERWORK_FLAG (sym_sec->owner))
  3891. {
  3892. _bfd_error_handler
  3893. (_("%pB(%s): warning: interworking not enabled;"
  3894. " first occurrence: %pB: %s call to %s"),
  3895. sym_sec->owner, name, input_bfd, "Thumb", "ARM");
  3896. }
  3897. stub_type =
  3898. (bfd_link_pic (info) | globals->pic_veneer)
  3899. /* PIC stubs. */
  3900. ? (r_type == R_ARM_THM_TLS_CALL
  3901. /* TLS PIC stubs. */
  3902. ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
  3903. : arm_stub_long_branch_v4t_thumb_tls_pic)
  3904. : ((globals->use_blx && r_type == R_ARM_THM_CALL)
  3905. /* V5T PIC and above. */
  3906. ? arm_stub_long_branch_any_arm_pic
  3907. /* V4T PIC stub. */
  3908. : arm_stub_long_branch_v4t_thumb_arm_pic))
  3909. /* non-PIC stubs. */
  3910. : ((globals->use_blx && r_type == R_ARM_THM_CALL)
  3911. /* V5T and above. */
  3912. ? arm_stub_long_branch_any_any
  3913. /* V4T. */
  3914. : arm_stub_long_branch_v4t_thumb_arm);
  3915. /* Handle v4t short branches. */
  3916. if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
  3917. && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
  3918. && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
  3919. stub_type = arm_stub_short_branch_v4t_thumb_arm;
  3920. }
  3921. }
  3922. }
  3923. else if (r_type == R_ARM_CALL
  3924. || r_type == R_ARM_JUMP24
  3925. || r_type == R_ARM_PLT32
  3926. || r_type == R_ARM_TLS_CALL)
  3927. {
  3928. if (input_sec->flags & SEC_ELF_PURECODE)
  3929. _bfd_error_handler
  3930. (_("%pB(%pA): warning: long branch veneers used in"
  3931. " section with SHF_ARM_PURECODE section"
  3932. " attribute is only supported for M-profile"
  3933. " targets that implement the movw instruction"),
  3934. input_bfd, input_sec);
  3935. if (branch_type == ST_BRANCH_TO_THUMB)
  3936. {
  3937. /* Arm to thumb. */
  3938. if (sym_sec != NULL
  3939. && sym_sec->owner != NULL
  3940. && !INTERWORK_FLAG (sym_sec->owner))
  3941. {
  3942. _bfd_error_handler
  3943. (_("%pB(%s): warning: interworking not enabled;"
  3944. " first occurrence: %pB: %s call to %s"),
  3945. sym_sec->owner, name, input_bfd, "ARM", "Thumb");
  3946. }
  3947. /* We have an extra 2-bytes reach because of
  3948. the mode change (bit 24 (H) of BLX encoding). */
  3949. if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
  3950. || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
  3951. || (r_type == R_ARM_CALL && !globals->use_blx)
  3952. || (r_type == R_ARM_JUMP24)
  3953. || (r_type == R_ARM_PLT32))
  3954. {
  3955. stub_type = (bfd_link_pic (info) | globals->pic_veneer)
  3956. /* PIC stubs. */
  3957. ? ((globals->use_blx)
  3958. /* V5T and above. */
  3959. ? arm_stub_long_branch_any_thumb_pic
  3960. /* V4T stub. */
  3961. : arm_stub_long_branch_v4t_arm_thumb_pic)
  3962. /* non-PIC stubs. */
  3963. : ((globals->use_blx)
  3964. /* V5T and above. */
  3965. ? arm_stub_long_branch_any_any
  3966. /* V4T. */
  3967. : arm_stub_long_branch_v4t_arm_thumb);
  3968. }
  3969. }
  3970. else
  3971. {
  3972. /* Arm to arm. */
  3973. if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
  3974. || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
  3975. {
  3976. stub_type =
  3977. (bfd_link_pic (info) | globals->pic_veneer)
  3978. /* PIC stubs. */
  3979. ? (r_type == R_ARM_TLS_CALL
  3980. /* TLS PIC Stub. */
  3981. ? arm_stub_long_branch_any_tls_pic
  3982. : (globals->root.target_os == is_nacl
  3983. ? arm_stub_long_branch_arm_nacl_pic
  3984. : arm_stub_long_branch_any_arm_pic))
  3985. /* non-PIC stubs. */
  3986. : (globals->root.target_os == is_nacl
  3987. ? arm_stub_long_branch_arm_nacl
  3988. : arm_stub_long_branch_any_any);
  3989. }
  3990. }
  3991. }
  3992. /* If a stub is needed, record the actual destination type. */
  3993. if (stub_type != arm_stub_none)
  3994. *actual_branch_type = branch_type;
  3995. return stub_type;
  3996. }
  3997. /* Build a name for an entry in the stub hash table. */
  3998. static char *
  3999. elf32_arm_stub_name (const asection *input_section,
  4000. const asection *sym_sec,
  4001. const struct elf32_arm_link_hash_entry *hash,
  4002. const Elf_Internal_Rela *rel,
  4003. enum elf32_arm_stub_type stub_type)
  4004. {
  4005. char *stub_name;
  4006. bfd_size_type len;
  4007. if (hash)
  4008. {
  4009. len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
  4010. stub_name = (char *) bfd_malloc (len);
  4011. if (stub_name != NULL)
  4012. sprintf (stub_name, "%08x_%s+%x_%d",
  4013. input_section->id & 0xffffffff,
  4014. hash->root.root.root.string,
  4015. (int) rel->r_addend & 0xffffffff,
  4016. (int) stub_type);
  4017. }
  4018. else
  4019. {
  4020. len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
  4021. stub_name = (char *) bfd_malloc (len);
  4022. if (stub_name != NULL)
  4023. sprintf (stub_name, "%08x_%x:%x+%x_%d",
  4024. input_section->id & 0xffffffff,
  4025. sym_sec->id & 0xffffffff,
  4026. ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
  4027. || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
  4028. ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
  4029. (int) rel->r_addend & 0xffffffff,
  4030. (int) stub_type);
  4031. }
  4032. return stub_name;
  4033. }
  4034. /* Look up an entry in the stub hash. Stub entries are cached because
  4035. creating the stub name takes a bit of time. */
  4036. static struct elf32_arm_stub_hash_entry *
  4037. elf32_arm_get_stub_entry (const asection *input_section,
  4038. const asection *sym_sec,
  4039. struct elf_link_hash_entry *hash,
  4040. const Elf_Internal_Rela *rel,
  4041. struct elf32_arm_link_hash_table *htab,
  4042. enum elf32_arm_stub_type stub_type)
  4043. {
  4044. struct elf32_arm_stub_hash_entry *stub_entry;
  4045. struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
  4046. const asection *id_sec;
  4047. if ((input_section->flags & SEC_CODE) == 0)
  4048. return NULL;
  4049. /* If the input section is the CMSE stubs one and it needs a long
  4050. branch stub to reach it's final destination, give up with an
  4051. error message: this is not supported. See PR ld/24709. */
  4052. if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen (CMSE_STUB_NAME)))
  4053. {
  4054. bfd *output_bfd = htab->obfd;
  4055. asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
  4056. _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
  4057. "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
  4058. CMSE_STUB_NAME,
  4059. (uint64_t)out_sec->output_section->vma
  4060. + out_sec->output_offset,
  4061. (uint64_t)sym_sec->output_section->vma
  4062. + sym_sec->output_offset
  4063. + h->root.root.u.def.value);
  4064. /* Exit, rather than leave incompletely processed
  4065. relocations. */
  4066. xexit (1);
  4067. }
  4068. /* If this input section is part of a group of sections sharing one
  4069. stub section, then use the id of the first section in the group.
  4070. Stub names need to include a section id, as there may well be
  4071. more than one stub used to reach say, printf, and we need to
  4072. distinguish between them. */
  4073. BFD_ASSERT (input_section->id <= htab->top_id);
  4074. id_sec = htab->stub_group[input_section->id].link_sec;
  4075. if (h != NULL && h->stub_cache != NULL
  4076. && h->stub_cache->h == h
  4077. && h->stub_cache->id_sec == id_sec
  4078. && h->stub_cache->stub_type == stub_type)
  4079. {
  4080. stub_entry = h->stub_cache;
  4081. }
  4082. else
  4083. {
  4084. char *stub_name;
  4085. stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
  4086. if (stub_name == NULL)
  4087. return NULL;
  4088. stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
  4089. stub_name, false, false);
  4090. if (h != NULL)
  4091. h->stub_cache = stub_entry;
  4092. free (stub_name);
  4093. }
  4094. return stub_entry;
  4095. }
  4096. /* Whether veneers of type STUB_TYPE require to be in a dedicated output
  4097. section. */
  4098. static bool
  4099. arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
  4100. {
  4101. if (stub_type >= max_stub_type)
  4102. abort (); /* Should be unreachable. */
  4103. switch (stub_type)
  4104. {
  4105. case arm_stub_cmse_branch_thumb_only:
  4106. return true;
  4107. default:
  4108. return false;
  4109. }
  4110. abort (); /* Should be unreachable. */
  4111. }
  4112. /* Required alignment (as a power of 2) for the dedicated section holding
  4113. veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
  4114. with input sections. */
  4115. static int
  4116. arm_dedicated_stub_output_section_required_alignment
  4117. (enum elf32_arm_stub_type stub_type)
  4118. {
  4119. if (stub_type >= max_stub_type)
  4120. abort (); /* Should be unreachable. */
  4121. switch (stub_type)
  4122. {
  4123. /* Vectors of Secure Gateway veneers must be aligned on 32byte
  4124. boundary. */
  4125. case arm_stub_cmse_branch_thumb_only:
  4126. return 5;
  4127. default:
  4128. BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
  4129. return 0;
  4130. }
  4131. abort (); /* Should be unreachable. */
  4132. }
  4133. /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
  4134. NULL if veneers of this type are interspersed with input sections. */
  4135. static const char *
  4136. arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
  4137. {
  4138. if (stub_type >= max_stub_type)
  4139. abort (); /* Should be unreachable. */
  4140. switch (stub_type)
  4141. {
  4142. case arm_stub_cmse_branch_thumb_only:
  4143. return CMSE_STUB_NAME;
  4144. default:
  4145. BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
  4146. return NULL;
  4147. }
  4148. abort (); /* Should be unreachable. */
  4149. }
  4150. /* If veneers of type STUB_TYPE should go in a dedicated output section,
  4151. returns the address of the hash table field in HTAB holding a pointer to the
  4152. corresponding input section. Otherwise, returns NULL. */
  4153. static asection **
  4154. arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
  4155. enum elf32_arm_stub_type stub_type)
  4156. {
  4157. if (stub_type >= max_stub_type)
  4158. abort (); /* Should be unreachable. */
  4159. switch (stub_type)
  4160. {
  4161. case arm_stub_cmse_branch_thumb_only:
  4162. return &htab->cmse_stub_sec;
  4163. default:
  4164. BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
  4165. return NULL;
  4166. }
  4167. abort (); /* Should be unreachable. */
  4168. }
  4169. /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
  4170. is the section that branch into veneer and can be NULL if stub should go in
  4171. a dedicated output section. Returns a pointer to the stub section, and the
  4172. section to which the stub section will be attached (in *LINK_SEC_P).
  4173. LINK_SEC_P may be NULL. */
  4174. static asection *
  4175. elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
  4176. struct elf32_arm_link_hash_table *htab,
  4177. enum elf32_arm_stub_type stub_type)
  4178. {
  4179. asection *link_sec, *out_sec, **stub_sec_p;
  4180. const char *stub_sec_prefix;
  4181. bool dedicated_output_section =
  4182. arm_dedicated_stub_output_section_required (stub_type);
  4183. int align;
  4184. if (dedicated_output_section)
  4185. {
  4186. bfd *output_bfd = htab->obfd;
  4187. const char *out_sec_name =
  4188. arm_dedicated_stub_output_section_name (stub_type);
  4189. link_sec = NULL;
  4190. stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
  4191. stub_sec_prefix = out_sec_name;
  4192. align = arm_dedicated_stub_output_section_required_alignment (stub_type);
  4193. out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
  4194. if (out_sec == NULL)
  4195. {
  4196. _bfd_error_handler (_("no address assigned to the veneers output "
  4197. "section %s"), out_sec_name);
  4198. return NULL;
  4199. }
  4200. }
  4201. else
  4202. {
  4203. BFD_ASSERT (section->id <= htab->top_id);
  4204. link_sec = htab->stub_group[section->id].link_sec;
  4205. BFD_ASSERT (link_sec != NULL);
  4206. stub_sec_p = &htab->stub_group[section->id].stub_sec;
  4207. if (*stub_sec_p == NULL)
  4208. stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
  4209. stub_sec_prefix = link_sec->name;
  4210. out_sec = link_sec->output_section;
  4211. align = htab->root.target_os == is_nacl ? 4 : 3;
  4212. }
  4213. if (*stub_sec_p == NULL)
  4214. {
  4215. size_t namelen;
  4216. bfd_size_type len;
  4217. char *s_name;
  4218. namelen = strlen (stub_sec_prefix);
  4219. len = namelen + sizeof (STUB_SUFFIX);
  4220. s_name = (char *) bfd_alloc (htab->stub_bfd, len);
  4221. if (s_name == NULL)
  4222. return NULL;
  4223. memcpy (s_name, stub_sec_prefix, namelen);
  4224. memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
  4225. *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
  4226. align);
  4227. if (*stub_sec_p == NULL)
  4228. return NULL;
  4229. out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
  4230. | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
  4231. | SEC_KEEP;
  4232. }
  4233. if (!dedicated_output_section)
  4234. htab->stub_group[section->id].stub_sec = *stub_sec_p;
  4235. if (link_sec_p)
  4236. *link_sec_p = link_sec;
  4237. return *stub_sec_p;
  4238. }
  4239. /* Add a new stub entry to the stub hash. Not all fields of the new
  4240. stub entry are initialised. */
  4241. static struct elf32_arm_stub_hash_entry *
  4242. elf32_arm_add_stub (const char *stub_name, asection *section,
  4243. struct elf32_arm_link_hash_table *htab,
  4244. enum elf32_arm_stub_type stub_type)
  4245. {
  4246. asection *link_sec;
  4247. asection *stub_sec;
  4248. struct elf32_arm_stub_hash_entry *stub_entry;
  4249. stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
  4250. stub_type);
  4251. if (stub_sec == NULL)
  4252. return NULL;
  4253. /* Enter this entry into the linker stub hash table. */
  4254. stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
  4255. true, false);
  4256. if (stub_entry == NULL)
  4257. {
  4258. if (section == NULL)
  4259. section = stub_sec;
  4260. _bfd_error_handler (_("%pB: cannot create stub entry %s"),
  4261. section->owner, stub_name);
  4262. return NULL;
  4263. }
  4264. stub_entry->stub_sec = stub_sec;
  4265. stub_entry->stub_offset = (bfd_vma) -1;
  4266. stub_entry->id_sec = link_sec;
  4267. return stub_entry;
  4268. }
  4269. /* Store an Arm insn into an output section not processed by
  4270. elf32_arm_write_section. */
  4271. static void
  4272. put_arm_insn (struct elf32_arm_link_hash_table * htab,
  4273. bfd * output_bfd, bfd_vma val, void * ptr)
  4274. {
  4275. if (htab->byteswap_code != bfd_little_endian (output_bfd))
  4276. bfd_putl32 (val, ptr);
  4277. else
  4278. bfd_putb32 (val, ptr);
  4279. }
  4280. /* Store a 16-bit Thumb insn into an output section not processed by
  4281. elf32_arm_write_section. */
  4282. static void
  4283. put_thumb_insn (struct elf32_arm_link_hash_table * htab,
  4284. bfd * output_bfd, bfd_vma val, void * ptr)
  4285. {
  4286. if (htab->byteswap_code != bfd_little_endian (output_bfd))
  4287. bfd_putl16 (val, ptr);
  4288. else
  4289. bfd_putb16 (val, ptr);
  4290. }
  4291. /* Store a Thumb2 insn into an output section not processed by
  4292. elf32_arm_write_section. */
  4293. static void
  4294. put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
  4295. bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
  4296. {
  4297. /* T2 instructions are 16-bit streamed. */
  4298. if (htab->byteswap_code != bfd_little_endian (output_bfd))
  4299. {
  4300. bfd_putl16 ((val >> 16) & 0xffff, ptr);
  4301. bfd_putl16 ((val & 0xffff), ptr + 2);
  4302. }
  4303. else
  4304. {
  4305. bfd_putb16 ((val >> 16) & 0xffff, ptr);
  4306. bfd_putb16 ((val & 0xffff), ptr + 2);
  4307. }
  4308. }
  4309. /* If it's possible to change R_TYPE to a more efficient access
  4310. model, return the new reloc type. */
  4311. static unsigned
  4312. elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
  4313. struct elf_link_hash_entry *h)
  4314. {
  4315. int is_local = (h == NULL);
  4316. if (bfd_link_dll (info)
  4317. || (h && h->root.type == bfd_link_hash_undefweak))
  4318. return r_type;
  4319. /* We do not support relaxations for Old TLS models. */
  4320. switch (r_type)
  4321. {
  4322. case R_ARM_TLS_GOTDESC:
  4323. case R_ARM_TLS_CALL:
  4324. case R_ARM_THM_TLS_CALL:
  4325. case R_ARM_TLS_DESCSEQ:
  4326. case R_ARM_THM_TLS_DESCSEQ:
  4327. return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
  4328. }
  4329. return r_type;
  4330. }
  4331. static bfd_reloc_status_type elf32_arm_final_link_relocate
  4332. (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
  4333. Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
  4334. const char *, unsigned char, enum arm_st_branch_type,
  4335. struct elf_link_hash_entry *, bool *, char **);
  4336. static unsigned int
  4337. arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
  4338. {
  4339. switch (stub_type)
  4340. {
  4341. case arm_stub_a8_veneer_b_cond:
  4342. case arm_stub_a8_veneer_b:
  4343. case arm_stub_a8_veneer_bl:
  4344. return 2;
  4345. case arm_stub_long_branch_any_any:
  4346. case arm_stub_long_branch_v4t_arm_thumb:
  4347. case arm_stub_long_branch_thumb_only:
  4348. case arm_stub_long_branch_thumb2_only:
  4349. case arm_stub_long_branch_thumb2_only_pure:
  4350. case arm_stub_long_branch_v4t_thumb_thumb:
  4351. case arm_stub_long_branch_v4t_thumb_arm:
  4352. case arm_stub_short_branch_v4t_thumb_arm:
  4353. case arm_stub_long_branch_any_arm_pic:
  4354. case arm_stub_long_branch_any_thumb_pic:
  4355. case arm_stub_long_branch_v4t_thumb_thumb_pic:
  4356. case arm_stub_long_branch_v4t_arm_thumb_pic:
  4357. case arm_stub_long_branch_v4t_thumb_arm_pic:
  4358. case arm_stub_long_branch_thumb_only_pic:
  4359. case arm_stub_long_branch_any_tls_pic:
  4360. case arm_stub_long_branch_v4t_thumb_tls_pic:
  4361. case arm_stub_cmse_branch_thumb_only:
  4362. case arm_stub_a8_veneer_blx:
  4363. return 4;
  4364. case arm_stub_long_branch_arm_nacl:
  4365. case arm_stub_long_branch_arm_nacl_pic:
  4366. return 16;
  4367. default:
  4368. abort (); /* Should be unreachable. */
  4369. }
  4370. }
  4371. /* Returns whether stubs of type STUB_TYPE take over the symbol they are
  4372. veneering (TRUE) or have their own symbol (FALSE). */
  4373. static bool
  4374. arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
  4375. {
  4376. if (stub_type >= max_stub_type)
  4377. abort (); /* Should be unreachable. */
  4378. switch (stub_type)
  4379. {
  4380. case arm_stub_cmse_branch_thumb_only:
  4381. return true;
  4382. default:
  4383. return false;
  4384. }
  4385. abort (); /* Should be unreachable. */
  4386. }
  4387. /* Returns the padding needed for the dedicated section used stubs of type
  4388. STUB_TYPE. */
  4389. static int
  4390. arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
  4391. {
  4392. if (stub_type >= max_stub_type)
  4393. abort (); /* Should be unreachable. */
  4394. switch (stub_type)
  4395. {
  4396. case arm_stub_cmse_branch_thumb_only:
  4397. return 32;
  4398. default:
  4399. return 0;
  4400. }
  4401. abort (); /* Should be unreachable. */
  4402. }
  4403. /* If veneers of type STUB_TYPE should go in a dedicated output section,
  4404. returns the address of the hash table field in HTAB holding the offset at
  4405. which new veneers should be layed out in the stub section. */
  4406. static bfd_vma*
  4407. arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
  4408. enum elf32_arm_stub_type stub_type)
  4409. {
  4410. switch (stub_type)
  4411. {
  4412. case arm_stub_cmse_branch_thumb_only:
  4413. return &htab->new_cmse_stub_offset;
  4414. default:
  4415. BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
  4416. return NULL;
  4417. }
  4418. }
  4419. static bool
  4420. arm_build_one_stub (struct bfd_hash_entry *gen_entry,
  4421. void * in_arg)
  4422. {
  4423. #define MAXRELOCS 3
  4424. bool removed_sg_veneer;
  4425. struct elf32_arm_stub_hash_entry *stub_entry;
  4426. struct elf32_arm_link_hash_table *globals;
  4427. struct bfd_link_info *info;
  4428. asection *stub_sec;
  4429. bfd *stub_bfd;
  4430. bfd_byte *loc;
  4431. bfd_vma sym_value;
  4432. int template_size;
  4433. int size;
  4434. const insn_sequence *template_sequence;
  4435. int i;
  4436. int stub_reloc_idx[MAXRELOCS] = {-1, -1};
  4437. int stub_reloc_offset[MAXRELOCS] = {0, 0};
  4438. int nrelocs = 0;
  4439. int just_allocated = 0;
  4440. /* Massage our args to the form they really have. */
  4441. stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
  4442. info = (struct bfd_link_info *) in_arg;
  4443. /* Fail if the target section could not be assigned to an output
  4444. section. The user should fix his linker script. */
  4445. if (stub_entry->target_section->output_section == NULL
  4446. && info->non_contiguous_regions)
  4447. info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
  4448. "Retry without --enable-non-contiguous-regions.\n"),
  4449. stub_entry->target_section);
  4450. globals = elf32_arm_hash_table (info);
  4451. if (globals == NULL)
  4452. return false;
  4453. stub_sec = stub_entry->stub_sec;
  4454. if ((globals->fix_cortex_a8 < 0)
  4455. != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
  4456. /* We have to do less-strictly-aligned fixes last. */
  4457. return true;
  4458. /* Assign a slot at the end of section if none assigned yet. */
  4459. if (stub_entry->stub_offset == (bfd_vma) -1)
  4460. {
  4461. stub_entry->stub_offset = stub_sec->size;
  4462. just_allocated = 1;
  4463. }
  4464. loc = stub_sec->contents + stub_entry->stub_offset;
  4465. stub_bfd = stub_sec->owner;
  4466. /* This is the address of the stub destination. */
  4467. sym_value = (stub_entry->target_value
  4468. + stub_entry->target_section->output_offset
  4469. + stub_entry->target_section->output_section->vma);
  4470. template_sequence = stub_entry->stub_template;
  4471. template_size = stub_entry->stub_template_size;
  4472. size = 0;
  4473. for (i = 0; i < template_size; i++)
  4474. {
  4475. switch (template_sequence[i].type)
  4476. {
  4477. case THUMB16_TYPE:
  4478. {
  4479. bfd_vma data = (bfd_vma) template_sequence[i].data;
  4480. if (template_sequence[i].reloc_addend != 0)
  4481. {
  4482. /* We've borrowed the reloc_addend field to mean we should
  4483. insert a condition code into this (Thumb-1 branch)
  4484. instruction. See THUMB16_BCOND_INSN. */
  4485. BFD_ASSERT ((data & 0xff00) == 0xd000);
  4486. data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
  4487. }
  4488. bfd_put_16 (stub_bfd, data, loc + size);
  4489. size += 2;
  4490. }
  4491. break;
  4492. case THUMB32_TYPE:
  4493. bfd_put_16 (stub_bfd,
  4494. (template_sequence[i].data >> 16) & 0xffff,
  4495. loc + size);
  4496. bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
  4497. loc + size + 2);
  4498. if (template_sequence[i].r_type != R_ARM_NONE)
  4499. {
  4500. stub_reloc_idx[nrelocs] = i;
  4501. stub_reloc_offset[nrelocs++] = size;
  4502. }
  4503. size += 4;
  4504. break;
  4505. case ARM_TYPE:
  4506. bfd_put_32 (stub_bfd, template_sequence[i].data,
  4507. loc + size);
  4508. /* Handle cases where the target is encoded within the
  4509. instruction. */
  4510. if (template_sequence[i].r_type == R_ARM_JUMP24)
  4511. {
  4512. stub_reloc_idx[nrelocs] = i;
  4513. stub_reloc_offset[nrelocs++] = size;
  4514. }
  4515. size += 4;
  4516. break;
  4517. case DATA_TYPE:
  4518. bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
  4519. stub_reloc_idx[nrelocs] = i;
  4520. stub_reloc_offset[nrelocs++] = size;
  4521. size += 4;
  4522. break;
  4523. default:
  4524. BFD_FAIL ();
  4525. return false;
  4526. }
  4527. }
  4528. if (just_allocated)
  4529. stub_sec->size += size;
  4530. /* Stub size has already been computed in arm_size_one_stub. Check
  4531. consistency. */
  4532. BFD_ASSERT (size == stub_entry->stub_size);
  4533. /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
  4534. if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
  4535. sym_value |= 1;
  4536. /* Assume non empty slots have at least one and at most MAXRELOCS entries
  4537. to relocate in each stub. */
  4538. removed_sg_veneer =
  4539. (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
  4540. BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
  4541. for (i = 0; i < nrelocs; i++)
  4542. {
  4543. Elf_Internal_Rela rel;
  4544. bool unresolved_reloc;
  4545. char *error_message;
  4546. bfd_vma points_to =
  4547. sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
  4548. rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
  4549. rel.r_info = ELF32_R_INFO (0,
  4550. template_sequence[stub_reloc_idx[i]].r_type);
  4551. rel.r_addend = 0;
  4552. if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
  4553. /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
  4554. template should refer back to the instruction after the original
  4555. branch. We use target_section as Cortex-A8 erratum workaround stubs
  4556. are only generated when both source and target are in the same
  4557. section. */
  4558. points_to = stub_entry->target_section->output_section->vma
  4559. + stub_entry->target_section->output_offset
  4560. + stub_entry->source_value;
  4561. elf32_arm_final_link_relocate (elf32_arm_howto_from_type
  4562. (template_sequence[stub_reloc_idx[i]].r_type),
  4563. stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
  4564. points_to, info, stub_entry->target_section, "", STT_FUNC,
  4565. stub_entry->branch_type,
  4566. (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
  4567. &error_message);
  4568. }
  4569. return true;
  4570. #undef MAXRELOCS
  4571. }
  4572. /* Calculate the template, template size and instruction size for a stub.
  4573. Return value is the instruction size. */
  4574. static unsigned int
  4575. find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
  4576. const insn_sequence **stub_template,
  4577. int *stub_template_size)
  4578. {
  4579. const insn_sequence *template_sequence = NULL;
  4580. int template_size = 0, i;
  4581. unsigned int size;
  4582. template_sequence = stub_definitions[stub_type].template_sequence;
  4583. if (stub_template)
  4584. *stub_template = template_sequence;
  4585. template_size = stub_definitions[stub_type].template_size;
  4586. if (stub_template_size)
  4587. *stub_template_size = template_size;
  4588. size = 0;
  4589. for (i = 0; i < template_size; i++)
  4590. {
  4591. switch (template_sequence[i].type)
  4592. {
  4593. case THUMB16_TYPE:
  4594. size += 2;
  4595. break;
  4596. case ARM_TYPE:
  4597. case THUMB32_TYPE:
  4598. case DATA_TYPE:
  4599. size += 4;
  4600. break;
  4601. default:
  4602. BFD_FAIL ();
  4603. return 0;
  4604. }
  4605. }
  4606. return size;
  4607. }
  4608. /* As above, but don't actually build the stub. Just bump offset so
  4609. we know stub section sizes. */
  4610. static bool
  4611. arm_size_one_stub (struct bfd_hash_entry *gen_entry,
  4612. void *in_arg ATTRIBUTE_UNUSED)
  4613. {
  4614. struct elf32_arm_stub_hash_entry *stub_entry;
  4615. const insn_sequence *template_sequence;
  4616. int template_size, size;
  4617. /* Massage our args to the form they really have. */
  4618. stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
  4619. BFD_ASSERT ((stub_entry->stub_type > arm_stub_none)
  4620. && stub_entry->stub_type < ARRAY_SIZE (stub_definitions));
  4621. size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
  4622. &template_size);
  4623. /* Initialized to -1. Null size indicates an empty slot full of zeros. */
  4624. if (stub_entry->stub_template_size)
  4625. {
  4626. stub_entry->stub_size = size;
  4627. stub_entry->stub_template = template_sequence;
  4628. stub_entry->stub_template_size = template_size;
  4629. }
  4630. /* Already accounted for. */
  4631. if (stub_entry->stub_offset != (bfd_vma) -1)
  4632. return true;
  4633. size = (size + 7) & ~7;
  4634. stub_entry->stub_sec->size += size;
  4635. return true;
  4636. }
  4637. /* External entry points for sizing and building linker stubs. */
  4638. /* Set up various things so that we can make a list of input sections
  4639. for each output section included in the link. Returns -1 on error,
  4640. 0 when no stubs will be needed, and 1 on success. */
  4641. int
  4642. elf32_arm_setup_section_lists (bfd *output_bfd,
  4643. struct bfd_link_info *info)
  4644. {
  4645. bfd *input_bfd;
  4646. unsigned int bfd_count;
  4647. unsigned int top_id, top_index;
  4648. asection *section;
  4649. asection **input_list, **list;
  4650. size_t amt;
  4651. struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
  4652. if (htab == NULL)
  4653. return 0;
  4654. /* Count the number of input BFDs and find the top input section id. */
  4655. for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
  4656. input_bfd != NULL;
  4657. input_bfd = input_bfd->link.next)
  4658. {
  4659. bfd_count += 1;
  4660. for (section = input_bfd->sections;
  4661. section != NULL;
  4662. section = section->next)
  4663. {
  4664. if (top_id < section->id)
  4665. top_id = section->id;
  4666. }
  4667. }
  4668. htab->bfd_count = bfd_count;
  4669. amt = sizeof (struct map_stub) * (top_id + 1);
  4670. htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
  4671. if (htab->stub_group == NULL)
  4672. return -1;
  4673. htab->top_id = top_id;
  4674. /* We can't use output_bfd->section_count here to find the top output
  4675. section index as some sections may have been removed, and
  4676. _bfd_strip_section_from_output doesn't renumber the indices. */
  4677. for (section = output_bfd->sections, top_index = 0;
  4678. section != NULL;
  4679. section = section->next)
  4680. {
  4681. if (top_index < section->index)
  4682. top_index = section->index;
  4683. }
  4684. htab->top_index = top_index;
  4685. amt = sizeof (asection *) * (top_index + 1);
  4686. input_list = (asection **) bfd_malloc (amt);
  4687. htab->input_list = input_list;
  4688. if (input_list == NULL)
  4689. return -1;
  4690. /* For sections we aren't interested in, mark their entries with a
  4691. value we can check later. */
  4692. list = input_list + top_index;
  4693. do
  4694. *list = bfd_abs_section_ptr;
  4695. while (list-- != input_list);
  4696. for (section = output_bfd->sections;
  4697. section != NULL;
  4698. section = section->next)
  4699. {
  4700. if ((section->flags & SEC_CODE) != 0)
  4701. input_list[section->index] = NULL;
  4702. }
  4703. return 1;
  4704. }
  4705. /* The linker repeatedly calls this function for each input section,
  4706. in the order that input sections are linked into output sections.
  4707. Build lists of input sections to determine groupings between which
  4708. we may insert linker stubs. */
  4709. void
  4710. elf32_arm_next_input_section (struct bfd_link_info *info,
  4711. asection *isec)
  4712. {
  4713. struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
  4714. if (htab == NULL)
  4715. return;
  4716. if (isec->output_section->index <= htab->top_index)
  4717. {
  4718. asection **list = htab->input_list + isec->output_section->index;
  4719. if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
  4720. {
  4721. /* Steal the link_sec pointer for our list. */
  4722. #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
  4723. /* This happens to make the list in reverse order,
  4724. which we reverse later. */
  4725. PREV_SEC (isec) = *list;
  4726. *list = isec;
  4727. }
  4728. }
  4729. }
  4730. /* See whether we can group stub sections together. Grouping stub
  4731. sections may result in fewer stubs. More importantly, we need to
  4732. put all .init* and .fini* stubs at the end of the .init or
  4733. .fini output sections respectively, because glibc splits the
  4734. _init and _fini functions into multiple parts. Putting a stub in
  4735. the middle of a function is not a good idea. */
  4736. static void
  4737. group_sections (struct elf32_arm_link_hash_table *htab,
  4738. bfd_size_type stub_group_size,
  4739. bool stubs_always_after_branch)
  4740. {
  4741. asection **list = htab->input_list;
  4742. do
  4743. {
  4744. asection *tail = *list;
  4745. asection *head;
  4746. if (tail == bfd_abs_section_ptr)
  4747. continue;
  4748. /* Reverse the list: we must avoid placing stubs at the
  4749. beginning of the section because the beginning of the text
  4750. section may be required for an interrupt vector in bare metal
  4751. code. */
  4752. #define NEXT_SEC PREV_SEC
  4753. head = NULL;
  4754. while (tail != NULL)
  4755. {
  4756. /* Pop from tail. */
  4757. asection *item = tail;
  4758. tail = PREV_SEC (item);
  4759. /* Push on head. */
  4760. NEXT_SEC (item) = head;
  4761. head = item;
  4762. }
  4763. while (head != NULL)
  4764. {
  4765. asection *curr;
  4766. asection *next;
  4767. bfd_vma stub_group_start = head->output_offset;
  4768. bfd_vma end_of_next;
  4769. curr = head;
  4770. while (NEXT_SEC (curr) != NULL)
  4771. {
  4772. next = NEXT_SEC (curr);
  4773. end_of_next = next->output_offset + next->size;
  4774. if (end_of_next - stub_group_start >= stub_group_size)
  4775. /* End of NEXT is too far from start, so stop. */
  4776. break;
  4777. /* Add NEXT to the group. */
  4778. curr = next;
  4779. }
  4780. /* OK, the size from the start to the start of CURR is less
  4781. than stub_group_size and thus can be handled by one stub
  4782. section. (Or the head section is itself larger than
  4783. stub_group_size, in which case we may be toast.)
  4784. We should really be keeping track of the total size of
  4785. stubs added here, as stubs contribute to the final output
  4786. section size. */
  4787. do
  4788. {
  4789. next = NEXT_SEC (head);
  4790. /* Set up this stub group. */
  4791. htab->stub_group[head->id].link_sec = curr;
  4792. }
  4793. while (head != curr && (head = next) != NULL);
  4794. /* But wait, there's more! Input sections up to stub_group_size
  4795. bytes after the stub section can be handled by it too. */
  4796. if (!stubs_always_after_branch)
  4797. {
  4798. stub_group_start = curr->output_offset + curr->size;
  4799. while (next != NULL)
  4800. {
  4801. end_of_next = next->output_offset + next->size;
  4802. if (end_of_next - stub_group_start >= stub_group_size)
  4803. /* End of NEXT is too far from stubs, so stop. */
  4804. break;
  4805. /* Add NEXT to the stub group. */
  4806. head = next;
  4807. next = NEXT_SEC (head);
  4808. htab->stub_group[head->id].link_sec = curr;
  4809. }
  4810. }
  4811. head = next;
  4812. }
  4813. }
  4814. while (list++ != htab->input_list + htab->top_index);
  4815. free (htab->input_list);
  4816. #undef PREV_SEC
  4817. #undef NEXT_SEC
  4818. }
  4819. /* Comparison function for sorting/searching relocations relating to Cortex-A8
  4820. erratum fix. */
  4821. static int
  4822. a8_reloc_compare (const void *a, const void *b)
  4823. {
  4824. const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
  4825. const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
  4826. if (ra->from < rb->from)
  4827. return -1;
  4828. else if (ra->from > rb->from)
  4829. return 1;
  4830. else
  4831. return 0;
  4832. }
  4833. static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
  4834. const char *, char **);
  4835. /* Helper function to scan code for sequences which might trigger the Cortex-A8
  4836. branch/TLB erratum. Fill in the table described by A8_FIXES_P,
  4837. NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
  4838. otherwise. */
  4839. static bool
  4840. cortex_a8_erratum_scan (bfd *input_bfd,
  4841. struct bfd_link_info *info,
  4842. struct a8_erratum_fix **a8_fixes_p,
  4843. unsigned int *num_a8_fixes_p,
  4844. unsigned int *a8_fix_table_size_p,
  4845. struct a8_erratum_reloc *a8_relocs,
  4846. unsigned int num_a8_relocs,
  4847. unsigned prev_num_a8_fixes,
  4848. bool *stub_changed_p)
  4849. {
  4850. asection *section;
  4851. struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
  4852. struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
  4853. unsigned int num_a8_fixes = *num_a8_fixes_p;
  4854. unsigned int a8_fix_table_size = *a8_fix_table_size_p;
  4855. if (htab == NULL)
  4856. return false;
  4857. for (section = input_bfd->sections;
  4858. section != NULL;
  4859. section = section->next)
  4860. {
  4861. bfd_byte *contents = NULL;
  4862. struct _arm_elf_section_data *sec_data;
  4863. unsigned int span;
  4864. bfd_vma base_vma;
  4865. if (elf_section_type (section) != SHT_PROGBITS
  4866. || (elf_section_flags (section) & SHF_EXECINSTR) == 0
  4867. || (section->flags & SEC_EXCLUDE) != 0
  4868. || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
  4869. || (section->output_section == bfd_abs_section_ptr))
  4870. continue;
  4871. base_vma = section->output_section->vma + section->output_offset;
  4872. if (elf_section_data (section)->this_hdr.contents != NULL)
  4873. contents = elf_section_data (section)->this_hdr.contents;
  4874. else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
  4875. return true;
  4876. sec_data = elf32_arm_section_data (section);
  4877. for (span = 0; span < sec_data->mapcount; span++)
  4878. {
  4879. unsigned int span_start = sec_data->map[span].vma;
  4880. unsigned int span_end = (span == sec_data->mapcount - 1)
  4881. ? section->size : sec_data->map[span + 1].vma;
  4882. unsigned int i;
  4883. char span_type = sec_data->map[span].type;
  4884. bool last_was_32bit = false, last_was_branch = false;
  4885. if (span_type != 't')
  4886. continue;
  4887. /* Span is entirely within a single 4KB region: skip scanning. */
  4888. if (((base_vma + span_start) & ~0xfff)
  4889. == ((base_vma + span_end) & ~0xfff))
  4890. continue;
  4891. /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
  4892. * The opcode is BLX.W, BL.W, B.W, Bcc.W
  4893. * The branch target is in the same 4KB region as the
  4894. first half of the branch.
  4895. * The instruction before the branch is a 32-bit
  4896. length non-branch instruction. */
  4897. for (i = span_start; i < span_end;)
  4898. {
  4899. unsigned int insn = bfd_getl16 (&contents[i]);
  4900. bool insn_32bit = false, is_blx = false, is_b = false;
  4901. bool is_bl = false, is_bcc = false, is_32bit_branch;
  4902. if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
  4903. insn_32bit = true;
  4904. if (insn_32bit)
  4905. {
  4906. /* Load the rest of the insn (in manual-friendly order). */
  4907. insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
  4908. /* Encoding T4: B<c>.W. */
  4909. is_b = (insn & 0xf800d000) == 0xf0009000;
  4910. /* Encoding T1: BL<c>.W. */
  4911. is_bl = (insn & 0xf800d000) == 0xf000d000;
  4912. /* Encoding T2: BLX<c>.W. */
  4913. is_blx = (insn & 0xf800d000) == 0xf000c000;
  4914. /* Encoding T3: B<c>.W (not permitted in IT block). */
  4915. is_bcc = (insn & 0xf800d000) == 0xf0008000
  4916. && (insn & 0x07f00000) != 0x03800000;
  4917. }
  4918. is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
  4919. if (((base_vma + i) & 0xfff) == 0xffe
  4920. && insn_32bit
  4921. && is_32bit_branch
  4922. && last_was_32bit
  4923. && ! last_was_branch)
  4924. {
  4925. bfd_signed_vma offset = 0;
  4926. bool force_target_arm = false;
  4927. bool force_target_thumb = false;
  4928. bfd_vma target;
  4929. enum elf32_arm_stub_type stub_type = arm_stub_none;
  4930. struct a8_erratum_reloc key, *found;
  4931. bool use_plt = false;
  4932. key.from = base_vma + i;
  4933. found = (struct a8_erratum_reloc *)
  4934. bsearch (&key, a8_relocs, num_a8_relocs,
  4935. sizeof (struct a8_erratum_reloc),
  4936. &a8_reloc_compare);
  4937. if (found)
  4938. {
  4939. char *error_message = NULL;
  4940. struct elf_link_hash_entry *entry;
  4941. /* We don't care about the error returned from this
  4942. function, only if there is glue or not. */
  4943. entry = find_thumb_glue (info, found->sym_name,
  4944. &error_message);
  4945. if (entry)
  4946. found->non_a8_stub = true;
  4947. /* Keep a simpler condition, for the sake of clarity. */
  4948. if (htab->root.splt != NULL && found->hash != NULL
  4949. && found->hash->root.plt.offset != (bfd_vma) -1)
  4950. use_plt = true;
  4951. if (found->r_type == R_ARM_THM_CALL)
  4952. {
  4953. if (found->branch_type == ST_BRANCH_TO_ARM
  4954. || use_plt)
  4955. force_target_arm = true;
  4956. else
  4957. force_target_thumb = true;
  4958. }
  4959. }
  4960. /* Check if we have an offending branch instruction. */
  4961. if (found && found->non_a8_stub)
  4962. /* We've already made a stub for this instruction, e.g.
  4963. it's a long branch or a Thumb->ARM stub. Assume that
  4964. stub will suffice to work around the A8 erratum (see
  4965. setting of always_after_branch above). */
  4966. ;
  4967. else if (is_bcc)
  4968. {
  4969. offset = (insn & 0x7ff) << 1;
  4970. offset |= (insn & 0x3f0000) >> 4;
  4971. offset |= (insn & 0x2000) ? 0x40000 : 0;
  4972. offset |= (insn & 0x800) ? 0x80000 : 0;
  4973. offset |= (insn & 0x4000000) ? 0x100000 : 0;
  4974. if (offset & 0x100000)
  4975. offset |= ~ ((bfd_signed_vma) 0xfffff);
  4976. stub_type = arm_stub_a8_veneer_b_cond;
  4977. }
  4978. else if (is_b || is_bl || is_blx)
  4979. {
  4980. int s = (insn & 0x4000000) != 0;
  4981. int j1 = (insn & 0x2000) != 0;
  4982. int j2 = (insn & 0x800) != 0;
  4983. int i1 = !(j1 ^ s);
  4984. int i2 = !(j2 ^ s);
  4985. offset = (insn & 0x7ff) << 1;
  4986. offset |= (insn & 0x3ff0000) >> 4;
  4987. offset |= i2 << 22;
  4988. offset |= i1 << 23;
  4989. offset |= s << 24;
  4990. if (offset & 0x1000000)
  4991. offset |= ~ ((bfd_signed_vma) 0xffffff);
  4992. if (is_blx)
  4993. offset &= ~ ((bfd_signed_vma) 3);
  4994. stub_type = is_blx ? arm_stub_a8_veneer_blx :
  4995. is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
  4996. }
  4997. if (stub_type != arm_stub_none)
  4998. {
  4999. bfd_vma pc_for_insn = base_vma + i + 4;
  5000. /* The original instruction is a BL, but the target is
  5001. an ARM instruction. If we were not making a stub,
  5002. the BL would have been converted to a BLX. Use the
  5003. BLX stub instead in that case. */
  5004. if (htab->use_blx && force_target_arm
  5005. && stub_type == arm_stub_a8_veneer_bl)
  5006. {
  5007. stub_type = arm_stub_a8_veneer_blx;
  5008. is_blx = true;
  5009. is_bl = false;
  5010. }
  5011. /* Conversely, if the original instruction was
  5012. BLX but the target is Thumb mode, use the BL
  5013. stub. */
  5014. else if (force_target_thumb
  5015. && stub_type == arm_stub_a8_veneer_blx)
  5016. {
  5017. stub_type = arm_stub_a8_veneer_bl;
  5018. is_blx = false;
  5019. is_bl = true;
  5020. }
  5021. if (is_blx)
  5022. pc_for_insn &= ~ ((bfd_vma) 3);
  5023. /* If we found a relocation, use the proper destination,
  5024. not the offset in the (unrelocated) instruction.
  5025. Note this is always done if we switched the stub type
  5026. above. */
  5027. if (found)
  5028. offset =
  5029. (bfd_signed_vma) (found->destination - pc_for_insn);
  5030. /* If the stub will use a Thumb-mode branch to a
  5031. PLT target, redirect it to the preceding Thumb
  5032. entry point. */
  5033. if (stub_type != arm_stub_a8_veneer_blx && use_plt)
  5034. offset -= PLT_THUMB_STUB_SIZE;
  5035. target = pc_for_insn + offset;
  5036. /* The BLX stub is ARM-mode code. Adjust the offset to
  5037. take the different PC value (+8 instead of +4) into
  5038. account. */
  5039. if (stub_type == arm_stub_a8_veneer_blx)
  5040. offset += 4;
  5041. if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
  5042. {
  5043. char *stub_name = NULL;
  5044. if (num_a8_fixes == a8_fix_table_size)
  5045. {
  5046. a8_fix_table_size *= 2;
  5047. a8_fixes = (struct a8_erratum_fix *)
  5048. bfd_realloc (a8_fixes,
  5049. sizeof (struct a8_erratum_fix)
  5050. * a8_fix_table_size);
  5051. }
  5052. if (num_a8_fixes < prev_num_a8_fixes)
  5053. {
  5054. /* If we're doing a subsequent scan,
  5055. check if we've found the same fix as
  5056. before, and try and reuse the stub
  5057. name. */
  5058. stub_name = a8_fixes[num_a8_fixes].stub_name;
  5059. if ((a8_fixes[num_a8_fixes].section != section)
  5060. || (a8_fixes[num_a8_fixes].offset != i))
  5061. {
  5062. free (stub_name);
  5063. stub_name = NULL;
  5064. *stub_changed_p = true;
  5065. }
  5066. }
  5067. if (!stub_name)
  5068. {
  5069. stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
  5070. if (stub_name != NULL)
  5071. sprintf (stub_name, "%x:%x", section->id, i);
  5072. }
  5073. a8_fixes[num_a8_fixes].input_bfd = input_bfd;
  5074. a8_fixes[num_a8_fixes].section = section;
  5075. a8_fixes[num_a8_fixes].offset = i;
  5076. a8_fixes[num_a8_fixes].target_offset =
  5077. target - base_vma;
  5078. a8_fixes[num_a8_fixes].orig_insn = insn;
  5079. a8_fixes[num_a8_fixes].stub_name = stub_name;
  5080. a8_fixes[num_a8_fixes].stub_type = stub_type;
  5081. a8_fixes[num_a8_fixes].branch_type =
  5082. is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
  5083. num_a8_fixes++;
  5084. }
  5085. }
  5086. }
  5087. i += insn_32bit ? 4 : 2;
  5088. last_was_32bit = insn_32bit;
  5089. last_was_branch = is_32bit_branch;
  5090. }
  5091. }
  5092. if (elf_section_data (section)->this_hdr.contents == NULL)
  5093. free (contents);
  5094. }
  5095. *a8_fixes_p = a8_fixes;
  5096. *num_a8_fixes_p = num_a8_fixes;
  5097. *a8_fix_table_size_p = a8_fix_table_size;
  5098. return false;
  5099. }
  5100. /* Create or update a stub entry depending on whether the stub can already be
  5101. found in HTAB. The stub is identified by:
  5102. - its type STUB_TYPE
  5103. - its source branch (note that several can share the same stub) whose
  5104. section and relocation (if any) are given by SECTION and IRELA
  5105. respectively
  5106. - its target symbol whose input section, hash, name, value and branch type
  5107. are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
  5108. respectively
  5109. If found, the value of the stub's target symbol is updated from SYM_VALUE
  5110. and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
  5111. TRUE and the stub entry is initialized.
  5112. Returns the stub that was created or updated, or NULL if an error
  5113. occurred. */
  5114. static struct elf32_arm_stub_hash_entry *
  5115. elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
  5116. enum elf32_arm_stub_type stub_type, asection *section,
  5117. Elf_Internal_Rela *irela, asection *sym_sec,
  5118. struct elf32_arm_link_hash_entry *hash, char *sym_name,
  5119. bfd_vma sym_value, enum arm_st_branch_type branch_type,
  5120. bool *new_stub)
  5121. {
  5122. const asection *id_sec;
  5123. char *stub_name;
  5124. struct elf32_arm_stub_hash_entry *stub_entry;
  5125. unsigned int r_type;
  5126. bool sym_claimed = arm_stub_sym_claimed (stub_type);
  5127. BFD_ASSERT (stub_type != arm_stub_none);
  5128. *new_stub = false;
  5129. if (sym_claimed)
  5130. stub_name = sym_name;
  5131. else
  5132. {
  5133. BFD_ASSERT (irela);
  5134. BFD_ASSERT (section);
  5135. BFD_ASSERT (section->id <= htab->top_id);
  5136. /* Support for grouping stub sections. */
  5137. id_sec = htab->stub_group[section->id].link_sec;
  5138. /* Get the name of this stub. */
  5139. stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
  5140. stub_type);
  5141. if (!stub_name)
  5142. return NULL;
  5143. }
  5144. stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, false,
  5145. false);
  5146. /* The proper stub has already been created, just update its value. */
  5147. if (stub_entry != NULL)
  5148. {
  5149. if (!sym_claimed)
  5150. free (stub_name);
  5151. stub_entry->target_value = sym_value;
  5152. return stub_entry;
  5153. }
  5154. stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
  5155. if (stub_entry == NULL)
  5156. {
  5157. if (!sym_claimed)
  5158. free (stub_name);
  5159. return NULL;
  5160. }
  5161. stub_entry->target_value = sym_value;
  5162. stub_entry->target_section = sym_sec;
  5163. stub_entry->stub_type = stub_type;
  5164. stub_entry->h = hash;
  5165. stub_entry->branch_type = branch_type;
  5166. if (sym_claimed)
  5167. stub_entry->output_name = sym_name;
  5168. else
  5169. {
  5170. if (sym_name == NULL)
  5171. sym_name = "unnamed";
  5172. stub_entry->output_name = (char *)
  5173. bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
  5174. + strlen (sym_name));
  5175. if (stub_entry->output_name == NULL)
  5176. {
  5177. free (stub_name);
  5178. return NULL;
  5179. }
  5180. /* For historical reasons, use the existing names for ARM-to-Thumb and
  5181. Thumb-to-ARM stubs. */
  5182. r_type = ELF32_R_TYPE (irela->r_info);
  5183. if ((r_type == (unsigned int) R_ARM_THM_CALL
  5184. || r_type == (unsigned int) R_ARM_THM_JUMP24
  5185. || r_type == (unsigned int) R_ARM_THM_JUMP19)
  5186. && branch_type == ST_BRANCH_TO_ARM)
  5187. sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
  5188. else if ((r_type == (unsigned int) R_ARM_CALL
  5189. || r_type == (unsigned int) R_ARM_JUMP24)
  5190. && branch_type == ST_BRANCH_TO_THUMB)
  5191. sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
  5192. else
  5193. sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
  5194. }
  5195. *new_stub = true;
  5196. return stub_entry;
  5197. }
  5198. /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
  5199. gateway veneer to transition from non secure to secure state and create them
  5200. accordingly.
  5201. "ARMv8-M Security Extensions: Requirements on Development Tools" document
  5202. defines the conditions that govern Secure Gateway veneer creation for a
  5203. given symbol <SYM> as follows:
  5204. - it has function type
  5205. - it has non local binding
  5206. - a symbol named __acle_se_<SYM> (called special symbol) exists with the
  5207. same type, binding and value as <SYM> (called normal symbol).
  5208. An entry function can handle secure state transition itself in which case
  5209. its special symbol would have a different value from the normal symbol.
  5210. OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
  5211. entry mapping while HTAB gives the name to hash entry mapping.
  5212. *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
  5213. created.
  5214. The return value gives whether a stub failed to be allocated. */
  5215. static bool
  5216. cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
  5217. obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
  5218. int *cmse_stub_created)
  5219. {
  5220. const struct elf_backend_data *bed;
  5221. Elf_Internal_Shdr *symtab_hdr;
  5222. unsigned i, j, sym_count, ext_start;
  5223. Elf_Internal_Sym *cmse_sym, *local_syms;
  5224. struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
  5225. enum arm_st_branch_type branch_type;
  5226. char *sym_name, *lsym_name;
  5227. bfd_vma sym_value;
  5228. asection *section;
  5229. struct elf32_arm_stub_hash_entry *stub_entry;
  5230. bool is_v8m, new_stub, cmse_invalid, ret = true;
  5231. bed = get_elf_backend_data (input_bfd);
  5232. symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
  5233. sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
  5234. ext_start = symtab_hdr->sh_info;
  5235. is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
  5236. && out_attr[Tag_CPU_arch_profile].i == 'M');
  5237. local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
  5238. if (local_syms == NULL)
  5239. local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
  5240. symtab_hdr->sh_info, 0, NULL, NULL,
  5241. NULL);
  5242. if (symtab_hdr->sh_info && local_syms == NULL)
  5243. return false;
  5244. /* Scan symbols. */
  5245. for (i = 0; i < sym_count; i++)
  5246. {
  5247. cmse_invalid = false;
  5248. if (i < ext_start)
  5249. {
  5250. cmse_sym = &local_syms[i];
  5251. sym_name = bfd_elf_string_from_elf_section (input_bfd,
  5252. symtab_hdr->sh_link,
  5253. cmse_sym->st_name);
  5254. if (!sym_name || !startswith (sym_name, CMSE_PREFIX))
  5255. continue;
  5256. /* Special symbol with local binding. */
  5257. cmse_invalid = true;
  5258. }
  5259. else
  5260. {
  5261. cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
  5262. sym_name = (char *) cmse_hash->root.root.root.string;
  5263. if (!startswith (sym_name, CMSE_PREFIX))
  5264. continue;
  5265. /* Special symbol has incorrect binding or type. */
  5266. if ((cmse_hash->root.root.type != bfd_link_hash_defined
  5267. && cmse_hash->root.root.type != bfd_link_hash_defweak)
  5268. || cmse_hash->root.type != STT_FUNC)
  5269. cmse_invalid = true;
  5270. }
  5271. if (!is_v8m)
  5272. {
  5273. _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
  5274. "ARMv8-M architecture or later"),
  5275. input_bfd, sym_name);
  5276. is_v8m = true; /* Avoid multiple warning. */
  5277. ret = false;
  5278. }
  5279. if (cmse_invalid)
  5280. {
  5281. _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
  5282. " a global or weak function symbol"),
  5283. input_bfd, sym_name);
  5284. ret = false;
  5285. if (i < ext_start)
  5286. continue;
  5287. }
  5288. sym_name += strlen (CMSE_PREFIX);
  5289. hash = (struct elf32_arm_link_hash_entry *)
  5290. elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
  5291. /* No associated normal symbol or it is neither global nor weak. */
  5292. if (!hash
  5293. || (hash->root.root.type != bfd_link_hash_defined
  5294. && hash->root.root.type != bfd_link_hash_defweak)
  5295. || hash->root.type != STT_FUNC)
  5296. {
  5297. /* Initialize here to avoid warning about use of possibly
  5298. uninitialized variable. */
  5299. j = 0;
  5300. if (!hash)
  5301. {
  5302. /* Searching for a normal symbol with local binding. */
  5303. for (; j < ext_start; j++)
  5304. {
  5305. lsym_name =
  5306. bfd_elf_string_from_elf_section (input_bfd,
  5307. symtab_hdr->sh_link,
  5308. local_syms[j].st_name);
  5309. if (!strcmp (sym_name, lsym_name))
  5310. break;
  5311. }
  5312. }
  5313. if (hash || j < ext_start)
  5314. {
  5315. _bfd_error_handler
  5316. (_("%pB: invalid standard symbol `%s'; it must be "
  5317. "a global or weak function symbol"),
  5318. input_bfd, sym_name);
  5319. }
  5320. else
  5321. _bfd_error_handler
  5322. (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
  5323. ret = false;
  5324. if (!hash)
  5325. continue;
  5326. }
  5327. sym_value = hash->root.root.u.def.value;
  5328. section = hash->root.root.u.def.section;
  5329. if (cmse_hash->root.root.u.def.section != section)
  5330. {
  5331. _bfd_error_handler
  5332. (_("%pB: `%s' and its special symbol are in different sections"),
  5333. input_bfd, sym_name);
  5334. ret = false;
  5335. }
  5336. if (cmse_hash->root.root.u.def.value != sym_value)
  5337. continue; /* Ignore: could be an entry function starting with SG. */
  5338. /* If this section is a link-once section that will be discarded, then
  5339. don't create any stubs. */
  5340. if (section->output_section == NULL)
  5341. {
  5342. _bfd_error_handler
  5343. (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
  5344. continue;
  5345. }
  5346. if (hash->root.size == 0)
  5347. {
  5348. _bfd_error_handler
  5349. (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
  5350. ret = false;
  5351. }
  5352. if (!ret)
  5353. continue;
  5354. branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
  5355. stub_entry
  5356. = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
  5357. NULL, NULL, section, hash, sym_name,
  5358. sym_value, branch_type, &new_stub);
  5359. if (stub_entry == NULL)
  5360. ret = false;
  5361. else
  5362. {
  5363. BFD_ASSERT (new_stub);
  5364. (*cmse_stub_created)++;
  5365. }
  5366. }
  5367. if (!symtab_hdr->contents)
  5368. free (local_syms);
  5369. return ret;
  5370. }
  5371. /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
  5372. code entry function, ie can be called from non secure code without using a
  5373. veneer. */
  5374. static bool
  5375. cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
  5376. {
  5377. bfd_byte contents[4];
  5378. uint32_t first_insn;
  5379. asection *section;
  5380. file_ptr offset;
  5381. bfd *abfd;
  5382. /* Defined symbol of function type. */
  5383. if (hash->root.root.type != bfd_link_hash_defined
  5384. && hash->root.root.type != bfd_link_hash_defweak)
  5385. return false;
  5386. if (hash->root.type != STT_FUNC)
  5387. return false;
  5388. /* Read first instruction. */
  5389. section = hash->root.root.u.def.section;
  5390. abfd = section->owner;
  5391. offset = hash->root.root.u.def.value - section->vma;
  5392. if (!bfd_get_section_contents (abfd, section, contents, offset,
  5393. sizeof (contents)))
  5394. return false;
  5395. first_insn = bfd_get_32 (abfd, contents);
  5396. /* Starts by SG instruction. */
  5397. return first_insn == 0xe97fe97f;
  5398. }
  5399. /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
  5400. secure gateway veneers (ie. the veneers was not in the input import library)
  5401. and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
  5402. static bool
  5403. arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
  5404. {
  5405. struct elf32_arm_stub_hash_entry *stub_entry;
  5406. struct bfd_link_info *info;
  5407. /* Massage our args to the form they really have. */
  5408. stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
  5409. info = (struct bfd_link_info *) gen_info;
  5410. if (info->out_implib_bfd)
  5411. return true;
  5412. if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
  5413. return true;
  5414. if (stub_entry->stub_offset == (bfd_vma) -1)
  5415. _bfd_error_handler (" %s", stub_entry->output_name);
  5416. return true;
  5417. }
  5418. /* Set offset of each secure gateway veneers so that its address remain
  5419. identical to the one in the input import library referred by
  5420. HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
  5421. (present in input import library but absent from the executable being
  5422. linked) or if new veneers appeared and there is no output import library
  5423. (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
  5424. number of secure gateway veneers found in the input import library.
  5425. The function returns whether an error occurred. If no error occurred,
  5426. *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
  5427. and this function and HTAB->new_cmse_stub_offset is set to the biggest
  5428. veneer observed set for new veneers to be layed out after. */
  5429. static bool
  5430. set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
  5431. struct elf32_arm_link_hash_table *htab,
  5432. int *cmse_stub_created)
  5433. {
  5434. long symsize;
  5435. char *sym_name;
  5436. flagword flags;
  5437. long i, symcount;
  5438. bfd *in_implib_bfd;
  5439. asection *stub_out_sec;
  5440. bool ret = true;
  5441. Elf_Internal_Sym *intsym;
  5442. const char *out_sec_name;
  5443. bfd_size_type cmse_stub_size;
  5444. asymbol **sympp = NULL, *sym;
  5445. struct elf32_arm_link_hash_entry *hash;
  5446. const insn_sequence *cmse_stub_template;
  5447. struct elf32_arm_stub_hash_entry *stub_entry;
  5448. int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
  5449. bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
  5450. bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
  5451. /* No input secure gateway import library. */
  5452. if (!htab->in_implib_bfd)
  5453. return true;
  5454. in_implib_bfd = htab->in_implib_bfd;
  5455. if (!htab->cmse_implib)
  5456. {
  5457. _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
  5458. "Gateway import libraries"), in_implib_bfd);
  5459. return false;
  5460. }
  5461. /* Get symbol table size. */
  5462. symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
  5463. if (symsize < 0)
  5464. return false;
  5465. /* Read in the input secure gateway import library's symbol table. */
  5466. sympp = (asymbol **) bfd_malloc (symsize);
  5467. if (sympp == NULL)
  5468. return false;
  5469. symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
  5470. if (symcount < 0)
  5471. {
  5472. ret = false;
  5473. goto free_sym_buf;
  5474. }
  5475. htab->new_cmse_stub_offset = 0;
  5476. cmse_stub_size =
  5477. find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
  5478. &cmse_stub_template,
  5479. &cmse_stub_template_size);
  5480. out_sec_name =
  5481. arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
  5482. stub_out_sec =
  5483. bfd_get_section_by_name (htab->obfd, out_sec_name);
  5484. if (stub_out_sec != NULL)
  5485. cmse_stub_sec_vma = stub_out_sec->vma;
  5486. /* Set addresses of veneers mentionned in input secure gateway import
  5487. library's symbol table. */
  5488. for (i = 0; i < symcount; i++)
  5489. {
  5490. sym = sympp[i];
  5491. flags = sym->flags;
  5492. sym_name = (char *) bfd_asymbol_name (sym);
  5493. intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
  5494. if (sym->section != bfd_abs_section_ptr
  5495. || !(flags & (BSF_GLOBAL | BSF_WEAK))
  5496. || (flags & BSF_FUNCTION) != BSF_FUNCTION
  5497. || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
  5498. != ST_BRANCH_TO_THUMB))
  5499. {
  5500. _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
  5501. "symbol should be absolute, global and "
  5502. "refer to Thumb functions"),
  5503. in_implib_bfd, sym_name);
  5504. ret = false;
  5505. continue;
  5506. }
  5507. veneer_value = bfd_asymbol_value (sym);
  5508. stub_offset = veneer_value - cmse_stub_sec_vma;
  5509. stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
  5510. false, false);
  5511. hash = (struct elf32_arm_link_hash_entry *)
  5512. elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
  5513. /* Stub entry should have been created by cmse_scan or the symbol be of
  5514. a secure function callable from non secure code. */
  5515. if (!stub_entry && !hash)
  5516. {
  5517. bool new_stub;
  5518. _bfd_error_handler
  5519. (_("entry function `%s' disappeared from secure code"), sym_name);
  5520. hash = (struct elf32_arm_link_hash_entry *)
  5521. elf_link_hash_lookup (&(htab)->root, sym_name, true, true, true);
  5522. stub_entry
  5523. = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
  5524. NULL, NULL, bfd_abs_section_ptr, hash,
  5525. sym_name, veneer_value,
  5526. ST_BRANCH_TO_THUMB, &new_stub);
  5527. if (stub_entry == NULL)
  5528. ret = false;
  5529. else
  5530. {
  5531. BFD_ASSERT (new_stub);
  5532. new_cmse_stubs_created++;
  5533. (*cmse_stub_created)++;
  5534. }
  5535. stub_entry->stub_template_size = stub_entry->stub_size = 0;
  5536. stub_entry->stub_offset = stub_offset;
  5537. }
  5538. /* Symbol found is not callable from non secure code. */
  5539. else if (!stub_entry)
  5540. {
  5541. if (!cmse_entry_fct_p (hash))
  5542. {
  5543. _bfd_error_handler (_("`%s' refers to a non entry function"),
  5544. sym_name);
  5545. ret = false;
  5546. }
  5547. continue;
  5548. }
  5549. else
  5550. {
  5551. /* Only stubs for SG veneers should have been created. */
  5552. BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
  5553. /* Check visibility hasn't changed. */
  5554. if (!!(flags & BSF_GLOBAL)
  5555. != (hash->root.root.type == bfd_link_hash_defined))
  5556. _bfd_error_handler
  5557. (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
  5558. sym_name);
  5559. stub_entry->stub_offset = stub_offset;
  5560. }
  5561. /* Size should match that of a SG veneer. */
  5562. if (intsym->st_size != cmse_stub_size)
  5563. {
  5564. _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
  5565. in_implib_bfd, sym_name);
  5566. ret = false;
  5567. }
  5568. /* Previous veneer address is before current SG veneer section. */
  5569. if (veneer_value < cmse_stub_sec_vma)
  5570. {
  5571. /* Avoid offset underflow. */
  5572. if (stub_entry)
  5573. stub_entry->stub_offset = 0;
  5574. stub_offset = 0;
  5575. ret = false;
  5576. }
  5577. /* Complain if stub offset not a multiple of stub size. */
  5578. if (stub_offset % cmse_stub_size)
  5579. {
  5580. _bfd_error_handler
  5581. (_("offset of veneer for entry function `%s' not a multiple of "
  5582. "its size"), sym_name);
  5583. ret = false;
  5584. }
  5585. if (!ret)
  5586. continue;
  5587. new_cmse_stubs_created--;
  5588. if (veneer_value < cmse_stub_array_start)
  5589. cmse_stub_array_start = veneer_value;
  5590. next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
  5591. if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
  5592. htab->new_cmse_stub_offset = next_cmse_stub_offset;
  5593. }
  5594. if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
  5595. {
  5596. BFD_ASSERT (new_cmse_stubs_created > 0);
  5597. _bfd_error_handler
  5598. (_("new entry function(s) introduced but no output import library "
  5599. "specified:"));
  5600. bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
  5601. }
  5602. if (cmse_stub_array_start != cmse_stub_sec_vma)
  5603. {
  5604. _bfd_error_handler
  5605. (_("start address of `%s' is different from previous link"),
  5606. out_sec_name);
  5607. ret = false;
  5608. }
  5609. free_sym_buf:
  5610. free (sympp);
  5611. return ret;
  5612. }
  5613. /* Determine and set the size of the stub section for a final link.
  5614. The basic idea here is to examine all the relocations looking for
  5615. PC-relative calls to a target that is unreachable with a "bl"
  5616. instruction. */
  5617. bool
  5618. elf32_arm_size_stubs (bfd *output_bfd,
  5619. bfd *stub_bfd,
  5620. struct bfd_link_info *info,
  5621. bfd_signed_vma group_size,
  5622. asection * (*add_stub_section) (const char *, asection *,
  5623. asection *,
  5624. unsigned int),
  5625. void (*layout_sections_again) (void))
  5626. {
  5627. bool ret = true;
  5628. obj_attribute *out_attr;
  5629. int cmse_stub_created = 0;
  5630. bfd_size_type stub_group_size;
  5631. bool m_profile, stubs_always_after_branch, first_veneer_scan = true;
  5632. struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
  5633. struct a8_erratum_fix *a8_fixes = NULL;
  5634. unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
  5635. struct a8_erratum_reloc *a8_relocs = NULL;
  5636. unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
  5637. if (htab == NULL)
  5638. return false;
  5639. if (htab->fix_cortex_a8)
  5640. {
  5641. a8_fixes = (struct a8_erratum_fix *)
  5642. bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
  5643. a8_relocs = (struct a8_erratum_reloc *)
  5644. bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
  5645. }
  5646. /* Propagate mach to stub bfd, because it may not have been
  5647. finalized when we created stub_bfd. */
  5648. bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
  5649. bfd_get_mach (output_bfd));
  5650. /* Stash our params away. */
  5651. htab->stub_bfd = stub_bfd;
  5652. htab->add_stub_section = add_stub_section;
  5653. htab->layout_sections_again = layout_sections_again;
  5654. stubs_always_after_branch = group_size < 0;
  5655. out_attr = elf_known_obj_attributes_proc (output_bfd);
  5656. m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
  5657. /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
  5658. as the first half of a 32-bit branch straddling two 4K pages. This is a
  5659. crude way of enforcing that. */
  5660. if (htab->fix_cortex_a8)
  5661. stubs_always_after_branch = 1;
  5662. if (group_size < 0)
  5663. stub_group_size = -group_size;
  5664. else
  5665. stub_group_size = group_size;
  5666. if (stub_group_size == 1)
  5667. {
  5668. /* Default values. */
  5669. /* Thumb branch range is +-4MB has to be used as the default
  5670. maximum size (a given section can contain both ARM and Thumb
  5671. code, so the worst case has to be taken into account).
  5672. This value is 24K less than that, which allows for 2025
  5673. 12-byte stubs. If we exceed that, then we will fail to link.
  5674. The user will have to relink with an explicit group size
  5675. option. */
  5676. stub_group_size = 4170000;
  5677. }
  5678. group_sections (htab, stub_group_size, stubs_always_after_branch);
  5679. /* If we're applying the cortex A8 fix, we need to determine the
  5680. program header size now, because we cannot change it later --
  5681. that could alter section placements. Notice the A8 erratum fix
  5682. ends up requiring the section addresses to remain unchanged
  5683. modulo the page size. That's something we cannot represent
  5684. inside BFD, and we don't want to force the section alignment to
  5685. be the page size. */
  5686. if (htab->fix_cortex_a8)
  5687. (*htab->layout_sections_again) ();
  5688. while (1)
  5689. {
  5690. bfd *input_bfd;
  5691. unsigned int bfd_indx;
  5692. asection *stub_sec;
  5693. enum elf32_arm_stub_type stub_type;
  5694. bool stub_changed = false;
  5695. unsigned prev_num_a8_fixes = num_a8_fixes;
  5696. num_a8_fixes = 0;
  5697. for (input_bfd = info->input_bfds, bfd_indx = 0;
  5698. input_bfd != NULL;
  5699. input_bfd = input_bfd->link.next, bfd_indx++)
  5700. {
  5701. Elf_Internal_Shdr *symtab_hdr;
  5702. asection *section;
  5703. Elf_Internal_Sym *local_syms = NULL;
  5704. if (!is_arm_elf (input_bfd))
  5705. continue;
  5706. if ((input_bfd->flags & DYNAMIC) != 0
  5707. && (elf_sym_hashes (input_bfd) == NULL
  5708. || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
  5709. continue;
  5710. num_a8_relocs = 0;
  5711. /* We'll need the symbol table in a second. */
  5712. symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
  5713. if (symtab_hdr->sh_info == 0)
  5714. continue;
  5715. /* Limit scan of symbols to object file whose profile is
  5716. Microcontroller to not hinder performance in the general case. */
  5717. if (m_profile && first_veneer_scan)
  5718. {
  5719. struct elf_link_hash_entry **sym_hashes;
  5720. sym_hashes = elf_sym_hashes (input_bfd);
  5721. if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
  5722. &cmse_stub_created))
  5723. goto error_ret_free_local;
  5724. if (cmse_stub_created != 0)
  5725. stub_changed = true;
  5726. }
  5727. /* Walk over each section attached to the input bfd. */
  5728. for (section = input_bfd->sections;
  5729. section != NULL;
  5730. section = section->next)
  5731. {
  5732. Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
  5733. /* If there aren't any relocs, then there's nothing more
  5734. to do. */
  5735. if ((section->flags & SEC_RELOC) == 0
  5736. || section->reloc_count == 0
  5737. || (section->flags & SEC_CODE) == 0)
  5738. continue;
  5739. /* If this section is a link-once section that will be
  5740. discarded, then don't create any stubs. */
  5741. if (section->output_section == NULL
  5742. || section->output_section->owner != output_bfd)
  5743. continue;
  5744. /* Get the relocs. */
  5745. internal_relocs
  5746. = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
  5747. NULL, info->keep_memory);
  5748. if (internal_relocs == NULL)
  5749. goto error_ret_free_local;
  5750. /* Now examine each relocation. */
  5751. irela = internal_relocs;
  5752. irelaend = irela + section->reloc_count;
  5753. for (; irela < irelaend; irela++)
  5754. {
  5755. unsigned int r_type, r_indx;
  5756. asection *sym_sec;
  5757. bfd_vma sym_value;
  5758. bfd_vma destination;
  5759. struct elf32_arm_link_hash_entry *hash;
  5760. const char *sym_name;
  5761. unsigned char st_type;
  5762. enum arm_st_branch_type branch_type;
  5763. bool created_stub = false;
  5764. r_type = ELF32_R_TYPE (irela->r_info);
  5765. r_indx = ELF32_R_SYM (irela->r_info);
  5766. if (r_type >= (unsigned int) R_ARM_max)
  5767. {
  5768. bfd_set_error (bfd_error_bad_value);
  5769. error_ret_free_internal:
  5770. if (elf_section_data (section)->relocs == NULL)
  5771. free (internal_relocs);
  5772. /* Fall through. */
  5773. error_ret_free_local:
  5774. if (symtab_hdr->contents != (unsigned char *) local_syms)
  5775. free (local_syms);
  5776. return false;
  5777. }
  5778. hash = NULL;
  5779. if (r_indx >= symtab_hdr->sh_info)
  5780. hash = elf32_arm_hash_entry
  5781. (elf_sym_hashes (input_bfd)
  5782. [r_indx - symtab_hdr->sh_info]);
  5783. /* Only look for stubs on branch instructions, or
  5784. non-relaxed TLSCALL */
  5785. if ((r_type != (unsigned int) R_ARM_CALL)
  5786. && (r_type != (unsigned int) R_ARM_THM_CALL)
  5787. && (r_type != (unsigned int) R_ARM_JUMP24)
  5788. && (r_type != (unsigned int) R_ARM_THM_JUMP19)
  5789. && (r_type != (unsigned int) R_ARM_THM_XPC22)
  5790. && (r_type != (unsigned int) R_ARM_THM_JUMP24)
  5791. && (r_type != (unsigned int) R_ARM_PLT32)
  5792. && !((r_type == (unsigned int) R_ARM_TLS_CALL
  5793. || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
  5794. && r_type == (elf32_arm_tls_transition
  5795. (info, r_type,
  5796. (struct elf_link_hash_entry *) hash))
  5797. && ((hash ? hash->tls_type
  5798. : (elf32_arm_local_got_tls_type
  5799. (input_bfd)[r_indx]))
  5800. & GOT_TLS_GDESC) != 0))
  5801. continue;
  5802. /* Now determine the call target, its name, value,
  5803. section. */
  5804. sym_sec = NULL;
  5805. sym_value = 0;
  5806. destination = 0;
  5807. sym_name = NULL;
  5808. if (r_type == (unsigned int) R_ARM_TLS_CALL
  5809. || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
  5810. {
  5811. /* A non-relaxed TLS call. The target is the
  5812. plt-resident trampoline and nothing to do
  5813. with the symbol. */
  5814. BFD_ASSERT (htab->tls_trampoline > 0);
  5815. sym_sec = htab->root.splt;
  5816. sym_value = htab->tls_trampoline;
  5817. hash = 0;
  5818. st_type = STT_FUNC;
  5819. branch_type = ST_BRANCH_TO_ARM;
  5820. }
  5821. else if (!hash)
  5822. {
  5823. /* It's a local symbol. */
  5824. Elf_Internal_Sym *sym;
  5825. if (local_syms == NULL)
  5826. {
  5827. local_syms
  5828. = (Elf_Internal_Sym *) symtab_hdr->contents;
  5829. if (local_syms == NULL)
  5830. local_syms
  5831. = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
  5832. symtab_hdr->sh_info, 0,
  5833. NULL, NULL, NULL);
  5834. if (local_syms == NULL)
  5835. goto error_ret_free_internal;
  5836. }
  5837. sym = local_syms + r_indx;
  5838. if (sym->st_shndx == SHN_UNDEF)
  5839. sym_sec = bfd_und_section_ptr;
  5840. else if (sym->st_shndx == SHN_ABS)
  5841. sym_sec = bfd_abs_section_ptr;
  5842. else if (sym->st_shndx == SHN_COMMON)
  5843. sym_sec = bfd_com_section_ptr;
  5844. else
  5845. sym_sec =
  5846. bfd_section_from_elf_index (input_bfd, sym->st_shndx);
  5847. if (!sym_sec)
  5848. /* This is an undefined symbol. It can never
  5849. be resolved. */
  5850. continue;
  5851. if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
  5852. sym_value = sym->st_value;
  5853. destination = (sym_value + irela->r_addend
  5854. + sym_sec->output_offset
  5855. + sym_sec->output_section->vma);
  5856. st_type = ELF_ST_TYPE (sym->st_info);
  5857. branch_type =
  5858. ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
  5859. sym_name
  5860. = bfd_elf_string_from_elf_section (input_bfd,
  5861. symtab_hdr->sh_link,
  5862. sym->st_name);
  5863. }
  5864. else
  5865. {
  5866. /* It's an external symbol. */
  5867. while (hash->root.root.type == bfd_link_hash_indirect
  5868. || hash->root.root.type == bfd_link_hash_warning)
  5869. hash = ((struct elf32_arm_link_hash_entry *)
  5870. hash->root.root.u.i.link);
  5871. if (hash->root.root.type == bfd_link_hash_defined
  5872. || hash->root.root.type == bfd_link_hash_defweak)
  5873. {
  5874. sym_sec = hash->root.root.u.def.section;
  5875. sym_value = hash->root.root.u.def.value;
  5876. struct elf32_arm_link_hash_table *globals =
  5877. elf32_arm_hash_table (info);
  5878. /* For a destination in a shared library,
  5879. use the PLT stub as target address to
  5880. decide whether a branch stub is
  5881. needed. */
  5882. if (globals != NULL
  5883. && globals->root.splt != NULL
  5884. && hash != NULL
  5885. && hash->root.plt.offset != (bfd_vma) -1)
  5886. {
  5887. sym_sec = globals->root.splt;
  5888. sym_value = hash->root.plt.offset;
  5889. if (sym_sec->output_section != NULL)
  5890. destination = (sym_value
  5891. + sym_sec->output_offset
  5892. + sym_sec->output_section->vma);
  5893. }
  5894. else if (sym_sec->output_section != NULL)
  5895. destination = (sym_value + irela->r_addend
  5896. + sym_sec->output_offset
  5897. + sym_sec->output_section->vma);
  5898. }
  5899. else if ((hash->root.root.type == bfd_link_hash_undefined)
  5900. || (hash->root.root.type == bfd_link_hash_undefweak))
  5901. {
  5902. /* For a shared library, use the PLT stub as
  5903. target address to decide whether a long
  5904. branch stub is needed.
  5905. For absolute code, they cannot be handled. */
  5906. struct elf32_arm_link_hash_table *globals =
  5907. elf32_arm_hash_table (info);
  5908. if (globals != NULL
  5909. && globals->root.splt != NULL
  5910. && hash != NULL
  5911. && hash->root.plt.offset != (bfd_vma) -1)
  5912. {
  5913. sym_sec = globals->root.splt;
  5914. sym_value = hash->root.plt.offset;
  5915. if (sym_sec->output_section != NULL)
  5916. destination = (sym_value
  5917. + sym_sec->output_offset
  5918. + sym_sec->output_section->vma);
  5919. }
  5920. else
  5921. continue;
  5922. }
  5923. else
  5924. {
  5925. bfd_set_error (bfd_error_bad_value);
  5926. goto error_ret_free_internal;
  5927. }
  5928. st_type = hash->root.type;
  5929. branch_type =
  5930. ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
  5931. sym_name = hash->root.root.root.string;
  5932. }
  5933. do
  5934. {
  5935. bool new_stub;
  5936. struct elf32_arm_stub_hash_entry *stub_entry;
  5937. /* Determine what (if any) linker stub is needed. */
  5938. stub_type = arm_type_of_stub (info, section, irela,
  5939. st_type, &branch_type,
  5940. hash, destination, sym_sec,
  5941. input_bfd, sym_name);
  5942. if (stub_type == arm_stub_none)
  5943. break;
  5944. /* We've either created a stub for this reloc already,
  5945. or we are about to. */
  5946. stub_entry =
  5947. elf32_arm_create_stub (htab, stub_type, section, irela,
  5948. sym_sec, hash,
  5949. (char *) sym_name, sym_value,
  5950. branch_type, &new_stub);
  5951. created_stub = stub_entry != NULL;
  5952. if (!created_stub)
  5953. goto error_ret_free_internal;
  5954. else if (!new_stub)
  5955. break;
  5956. else
  5957. stub_changed = true;
  5958. }
  5959. while (0);
  5960. /* Look for relocations which might trigger Cortex-A8
  5961. erratum. */
  5962. if (htab->fix_cortex_a8
  5963. && (r_type == (unsigned int) R_ARM_THM_JUMP24
  5964. || r_type == (unsigned int) R_ARM_THM_JUMP19
  5965. || r_type == (unsigned int) R_ARM_THM_CALL
  5966. || r_type == (unsigned int) R_ARM_THM_XPC22))
  5967. {
  5968. bfd_vma from = section->output_section->vma
  5969. + section->output_offset
  5970. + irela->r_offset;
  5971. if ((from & 0xfff) == 0xffe)
  5972. {
  5973. /* Found a candidate. Note we haven't checked the
  5974. destination is within 4K here: if we do so (and
  5975. don't create an entry in a8_relocs) we can't tell
  5976. that a branch should have been relocated when
  5977. scanning later. */
  5978. if (num_a8_relocs == a8_reloc_table_size)
  5979. {
  5980. a8_reloc_table_size *= 2;
  5981. a8_relocs = (struct a8_erratum_reloc *)
  5982. bfd_realloc (a8_relocs,
  5983. sizeof (struct a8_erratum_reloc)
  5984. * a8_reloc_table_size);
  5985. }
  5986. a8_relocs[num_a8_relocs].from = from;
  5987. a8_relocs[num_a8_relocs].destination = destination;
  5988. a8_relocs[num_a8_relocs].r_type = r_type;
  5989. a8_relocs[num_a8_relocs].branch_type = branch_type;
  5990. a8_relocs[num_a8_relocs].sym_name = sym_name;
  5991. a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
  5992. a8_relocs[num_a8_relocs].hash = hash;
  5993. num_a8_relocs++;
  5994. }
  5995. }
  5996. }
  5997. /* We're done with the internal relocs, free them. */
  5998. if (elf_section_data (section)->relocs == NULL)
  5999. free (internal_relocs);
  6000. }
  6001. if (htab->fix_cortex_a8)
  6002. {
  6003. /* Sort relocs which might apply to Cortex-A8 erratum. */
  6004. qsort (a8_relocs, num_a8_relocs,
  6005. sizeof (struct a8_erratum_reloc),
  6006. &a8_reloc_compare);
  6007. /* Scan for branches which might trigger Cortex-A8 erratum. */
  6008. if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
  6009. &num_a8_fixes, &a8_fix_table_size,
  6010. a8_relocs, num_a8_relocs,
  6011. prev_num_a8_fixes, &stub_changed)
  6012. != 0)
  6013. goto error_ret_free_local;
  6014. }
  6015. if (local_syms != NULL
  6016. && symtab_hdr->contents != (unsigned char *) local_syms)
  6017. {
  6018. if (!info->keep_memory)
  6019. free (local_syms);
  6020. else
  6021. symtab_hdr->contents = (unsigned char *) local_syms;
  6022. }
  6023. }
  6024. if (first_veneer_scan
  6025. && !set_cmse_veneer_addr_from_implib (info, htab,
  6026. &cmse_stub_created))
  6027. ret = false;
  6028. if (prev_num_a8_fixes != num_a8_fixes)
  6029. stub_changed = true;
  6030. if (!stub_changed)
  6031. break;
  6032. /* OK, we've added some stubs. Find out the new size of the
  6033. stub sections. */
  6034. for (stub_sec = htab->stub_bfd->sections;
  6035. stub_sec != NULL;
  6036. stub_sec = stub_sec->next)
  6037. {
  6038. /* Ignore non-stub sections. */
  6039. if (!strstr (stub_sec->name, STUB_SUFFIX))
  6040. continue;
  6041. stub_sec->size = 0;
  6042. }
  6043. /* Add new SG veneers after those already in the input import
  6044. library. */
  6045. for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
  6046. stub_type++)
  6047. {
  6048. bfd_vma *start_offset_p;
  6049. asection **stub_sec_p;
  6050. start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
  6051. stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
  6052. if (start_offset_p == NULL)
  6053. continue;
  6054. BFD_ASSERT (stub_sec_p != NULL);
  6055. if (*stub_sec_p != NULL)
  6056. (*stub_sec_p)->size = *start_offset_p;
  6057. }
  6058. /* Compute stub section size, considering padding. */
  6059. bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
  6060. for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
  6061. stub_type++)
  6062. {
  6063. int size, padding;
  6064. asection **stub_sec_p;
  6065. padding = arm_dedicated_stub_section_padding (stub_type);
  6066. stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
  6067. /* Skip if no stub input section or no stub section padding
  6068. required. */
  6069. if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
  6070. continue;
  6071. /* Stub section padding required but no dedicated section. */
  6072. BFD_ASSERT (stub_sec_p);
  6073. size = (*stub_sec_p)->size;
  6074. size = (size + padding - 1) & ~(padding - 1);
  6075. (*stub_sec_p)->size = size;
  6076. }
  6077. /* Add Cortex-A8 erratum veneers to stub section sizes too. */
  6078. if (htab->fix_cortex_a8)
  6079. for (i = 0; i < num_a8_fixes; i++)
  6080. {
  6081. stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
  6082. a8_fixes[i].section, htab, a8_fixes[i].stub_type);
  6083. if (stub_sec == NULL)
  6084. return false;
  6085. stub_sec->size
  6086. += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
  6087. NULL);
  6088. }
  6089. /* Ask the linker to do its stuff. */
  6090. (*htab->layout_sections_again) ();
  6091. first_veneer_scan = false;
  6092. }
  6093. /* Add stubs for Cortex-A8 erratum fixes now. */
  6094. if (htab->fix_cortex_a8)
  6095. {
  6096. for (i = 0; i < num_a8_fixes; i++)
  6097. {
  6098. struct elf32_arm_stub_hash_entry *stub_entry;
  6099. char *stub_name = a8_fixes[i].stub_name;
  6100. asection *section = a8_fixes[i].section;
  6101. unsigned int section_id = a8_fixes[i].section->id;
  6102. asection *link_sec = htab->stub_group[section_id].link_sec;
  6103. asection *stub_sec = htab->stub_group[section_id].stub_sec;
  6104. const insn_sequence *template_sequence;
  6105. int template_size, size = 0;
  6106. stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
  6107. true, false);
  6108. if (stub_entry == NULL)
  6109. {
  6110. _bfd_error_handler (_("%pB: cannot create stub entry %s"),
  6111. section->owner, stub_name);
  6112. return false;
  6113. }
  6114. stub_entry->stub_sec = stub_sec;
  6115. stub_entry->stub_offset = (bfd_vma) -1;
  6116. stub_entry->id_sec = link_sec;
  6117. stub_entry->stub_type = a8_fixes[i].stub_type;
  6118. stub_entry->source_value = a8_fixes[i].offset;
  6119. stub_entry->target_section = a8_fixes[i].section;
  6120. stub_entry->target_value = a8_fixes[i].target_offset;
  6121. stub_entry->orig_insn = a8_fixes[i].orig_insn;
  6122. stub_entry->branch_type = a8_fixes[i].branch_type;
  6123. size = find_stub_size_and_template (a8_fixes[i].stub_type,
  6124. &template_sequence,
  6125. &template_size);
  6126. stub_entry->stub_size = size;
  6127. stub_entry->stub_template = template_sequence;
  6128. stub_entry->stub_template_size = template_size;
  6129. }
  6130. /* Stash the Cortex-A8 erratum fix array for use later in
  6131. elf32_arm_write_section(). */
  6132. htab->a8_erratum_fixes = a8_fixes;
  6133. htab->num_a8_erratum_fixes = num_a8_fixes;
  6134. }
  6135. else
  6136. {
  6137. htab->a8_erratum_fixes = NULL;
  6138. htab->num_a8_erratum_fixes = 0;
  6139. }
  6140. return ret;
  6141. }
  6142. /* Build all the stubs associated with the current output file. The
  6143. stubs are kept in a hash table attached to the main linker hash
  6144. table. We also set up the .plt entries for statically linked PIC
  6145. functions here. This function is called via arm_elf_finish in the
  6146. linker. */
  6147. bool
  6148. elf32_arm_build_stubs (struct bfd_link_info *info)
  6149. {
  6150. asection *stub_sec;
  6151. struct bfd_hash_table *table;
  6152. enum elf32_arm_stub_type stub_type;
  6153. struct elf32_arm_link_hash_table *htab;
  6154. htab = elf32_arm_hash_table (info);
  6155. if (htab == NULL)
  6156. return false;
  6157. for (stub_sec = htab->stub_bfd->sections;
  6158. stub_sec != NULL;
  6159. stub_sec = stub_sec->next)
  6160. {
  6161. bfd_size_type size;
  6162. /* Ignore non-stub sections. */
  6163. if (!strstr (stub_sec->name, STUB_SUFFIX))
  6164. continue;
  6165. /* Allocate memory to hold the linker stubs. Zeroing the stub sections
  6166. must at least be done for stub section requiring padding and for SG
  6167. veneers to ensure that a non secure code branching to a removed SG
  6168. veneer causes an error. */
  6169. size = stub_sec->size;
  6170. stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
  6171. if (stub_sec->contents == NULL && size != 0)
  6172. return false;
  6173. stub_sec->size = 0;
  6174. }
  6175. /* Add new SG veneers after those already in the input import library. */
  6176. for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
  6177. {
  6178. bfd_vma *start_offset_p;
  6179. asection **stub_sec_p;
  6180. start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
  6181. stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
  6182. if (start_offset_p == NULL)
  6183. continue;
  6184. BFD_ASSERT (stub_sec_p != NULL);
  6185. if (*stub_sec_p != NULL)
  6186. (*stub_sec_p)->size = *start_offset_p;
  6187. }
  6188. /* Build the stubs as directed by the stub hash table. */
  6189. table = &htab->stub_hash_table;
  6190. bfd_hash_traverse (table, arm_build_one_stub, info);
  6191. if (htab->fix_cortex_a8)
  6192. {
  6193. /* Place the cortex a8 stubs last. */
  6194. htab->fix_cortex_a8 = -1;
  6195. bfd_hash_traverse (table, arm_build_one_stub, info);
  6196. }
  6197. return true;
  6198. }
  6199. /* Locate the Thumb encoded calling stub for NAME. */
  6200. static struct elf_link_hash_entry *
  6201. find_thumb_glue (struct bfd_link_info *link_info,
  6202. const char *name,
  6203. char **error_message)
  6204. {
  6205. char *tmp_name;
  6206. struct elf_link_hash_entry *hash;
  6207. struct elf32_arm_link_hash_table *hash_table;
  6208. /* We need a pointer to the armelf specific hash table. */
  6209. hash_table = elf32_arm_hash_table (link_info);
  6210. if (hash_table == NULL)
  6211. return NULL;
  6212. tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
  6213. + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
  6214. BFD_ASSERT (tmp_name);
  6215. sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
  6216. hash = elf_link_hash_lookup
  6217. (&(hash_table)->root, tmp_name, false, false, true);
  6218. if (hash == NULL
  6219. && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
  6220. "Thumb", tmp_name, name) == -1)
  6221. *error_message = (char *) bfd_errmsg (bfd_error_system_call);
  6222. free (tmp_name);
  6223. return hash;
  6224. }
  6225. /* Locate the ARM encoded calling stub for NAME. */
  6226. static struct elf_link_hash_entry *
  6227. find_arm_glue (struct bfd_link_info *link_info,
  6228. const char *name,
  6229. char **error_message)
  6230. {
  6231. char *tmp_name;
  6232. struct elf_link_hash_entry *myh;
  6233. struct elf32_arm_link_hash_table *hash_table;
  6234. /* We need a pointer to the elfarm specific hash table. */
  6235. hash_table = elf32_arm_hash_table (link_info);
  6236. if (hash_table == NULL)
  6237. return NULL;
  6238. tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
  6239. + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
  6240. BFD_ASSERT (tmp_name);
  6241. sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
  6242. myh = elf_link_hash_lookup
  6243. (&(hash_table)->root, tmp_name, false, false, true);
  6244. if (myh == NULL
  6245. && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
  6246. "ARM", tmp_name, name) == -1)
  6247. *error_message = (char *) bfd_errmsg (bfd_error_system_call);
  6248. free (tmp_name);
  6249. return myh;
  6250. }
  6251. /* ARM->Thumb glue (static images):
  6252. .arm
  6253. __func_from_arm:
  6254. ldr r12, __func_addr
  6255. bx r12
  6256. __func_addr:
  6257. .word func @ behave as if you saw a ARM_32 reloc.
  6258. (v5t static images)
  6259. .arm
  6260. __func_from_arm:
  6261. ldr pc, __func_addr
  6262. __func_addr:
  6263. .word func @ behave as if you saw a ARM_32 reloc.
  6264. (relocatable images)
  6265. .arm
  6266. __func_from_arm:
  6267. ldr r12, __func_offset
  6268. add r12, r12, pc
  6269. bx r12
  6270. __func_offset:
  6271. .word func - . */
  6272. #define ARM2THUMB_STATIC_GLUE_SIZE 12
  6273. static const insn32 a2t1_ldr_insn = 0xe59fc000;
  6274. static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
  6275. static const insn32 a2t3_func_addr_insn = 0x00000001;
  6276. #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
  6277. static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
  6278. static const insn32 a2t2v5_func_addr_insn = 0x00000001;
  6279. #define ARM2THUMB_PIC_GLUE_SIZE 16
  6280. static const insn32 a2t1p_ldr_insn = 0xe59fc004;
  6281. static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
  6282. static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
  6283. /* Thumb->ARM: Thumb->(non-interworking aware) ARM
  6284. .thumb .thumb
  6285. .align 2 .align 2
  6286. __func_from_thumb: __func_from_thumb:
  6287. bx pc push {r6, lr}
  6288. nop ldr r6, __func_addr
  6289. .arm mov lr, pc
  6290. b func bx r6
  6291. .arm
  6292. ;; back_to_thumb
  6293. ldmia r13! {r6, lr}
  6294. bx lr
  6295. __func_addr:
  6296. .word func */
  6297. #define THUMB2ARM_GLUE_SIZE 8
  6298. static const insn16 t2a1_bx_pc_insn = 0x4778;
  6299. static const insn16 t2a2_noop_insn = 0x46c0;
  6300. static const insn32 t2a3_b_insn = 0xea000000;
  6301. #define VFP11_ERRATUM_VENEER_SIZE 8
  6302. #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
  6303. #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
  6304. #define ARM_BX_VENEER_SIZE 12
  6305. static const insn32 armbx1_tst_insn = 0xe3100001;
  6306. static const insn32 armbx2_moveq_insn = 0x01a0f000;
  6307. static const insn32 armbx3_bx_insn = 0xe12fff10;
  6308. #ifndef ELFARM_NABI_C_INCLUDED
  6309. static void
  6310. arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
  6311. {
  6312. asection * s;
  6313. bfd_byte * contents;
  6314. if (size == 0)
  6315. {
  6316. /* Do not include empty glue sections in the output. */
  6317. if (abfd != NULL)
  6318. {
  6319. s = bfd_get_linker_section (abfd, name);
  6320. if (s != NULL)
  6321. s->flags |= SEC_EXCLUDE;
  6322. }
  6323. return;
  6324. }
  6325. BFD_ASSERT (abfd != NULL);
  6326. s = bfd_get_linker_section (abfd, name);
  6327. BFD_ASSERT (s != NULL);
  6328. contents = (bfd_byte *) bfd_zalloc (abfd, size);
  6329. BFD_ASSERT (s->size == size);
  6330. s->contents = contents;
  6331. }
  6332. bool
  6333. bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
  6334. {
  6335. struct elf32_arm_link_hash_table * globals;
  6336. globals = elf32_arm_hash_table (info);
  6337. BFD_ASSERT (globals != NULL);
  6338. arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
  6339. globals->arm_glue_size,
  6340. ARM2THUMB_GLUE_SECTION_NAME);
  6341. arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
  6342. globals->thumb_glue_size,
  6343. THUMB2ARM_GLUE_SECTION_NAME);
  6344. arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
  6345. globals->vfp11_erratum_glue_size,
  6346. VFP11_ERRATUM_VENEER_SECTION_NAME);
  6347. arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
  6348. globals->stm32l4xx_erratum_glue_size,
  6349. STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
  6350. arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
  6351. globals->bx_glue_size,
  6352. ARM_BX_GLUE_SECTION_NAME);
  6353. return true;
  6354. }
  6355. /* Allocate space and symbols for calling a Thumb function from Arm mode.
  6356. returns the symbol identifying the stub. */
  6357. static struct elf_link_hash_entry *
  6358. record_arm_to_thumb_glue (struct bfd_link_info * link_info,
  6359. struct elf_link_hash_entry * h)
  6360. {
  6361. const char * name = h->root.root.string;
  6362. asection * s;
  6363. char * tmp_name;
  6364. struct elf_link_hash_entry * myh;
  6365. struct bfd_link_hash_entry * bh;
  6366. struct elf32_arm_link_hash_table * globals;
  6367. bfd_vma val;
  6368. bfd_size_type size;
  6369. globals = elf32_arm_hash_table (link_info);
  6370. BFD_ASSERT (globals != NULL);
  6371. BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
  6372. s = bfd_get_linker_section
  6373. (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
  6374. BFD_ASSERT (s != NULL);
  6375. tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
  6376. + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
  6377. BFD_ASSERT (tmp_name);
  6378. sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
  6379. myh = elf_link_hash_lookup
  6380. (&(globals)->root, tmp_name, false, false, true);
  6381. if (myh != NULL)
  6382. {
  6383. /* We've already seen this guy. */
  6384. free (tmp_name);
  6385. return myh;
  6386. }
  6387. /* The only trick here is using hash_table->arm_glue_size as the value.
  6388. Even though the section isn't allocated yet, this is where we will be
  6389. putting it. The +1 on the value marks that the stub has not been
  6390. output yet - not that it is a Thumb function. */
  6391. bh = NULL;
  6392. val = globals->arm_glue_size + 1;
  6393. _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
  6394. tmp_name, BSF_GLOBAL, s, val,
  6395. NULL, true, false, &bh);
  6396. myh = (struct elf_link_hash_entry *) bh;
  6397. myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
  6398. myh->forced_local = 1;
  6399. free (tmp_name);
  6400. if (bfd_link_pic (link_info)
  6401. || globals->root.is_relocatable_executable
  6402. || globals->pic_veneer)
  6403. size = ARM2THUMB_PIC_GLUE_SIZE;
  6404. else if (globals->use_blx)
  6405. size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
  6406. else
  6407. size = ARM2THUMB_STATIC_GLUE_SIZE;
  6408. s->size += size;
  6409. globals->arm_glue_size += size;
  6410. return myh;
  6411. }
  6412. /* Allocate space for ARMv4 BX veneers. */
  6413. static void
  6414. record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
  6415. {
  6416. asection * s;
  6417. struct elf32_arm_link_hash_table *globals;
  6418. char *tmp_name;
  6419. struct elf_link_hash_entry *myh;
  6420. struct bfd_link_hash_entry *bh;
  6421. bfd_vma val;
  6422. /* BX PC does not need a veneer. */
  6423. if (reg == 15)
  6424. return;
  6425. globals = elf32_arm_hash_table (link_info);
  6426. BFD_ASSERT (globals != NULL);
  6427. BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
  6428. /* Check if this veneer has already been allocated. */
  6429. if (globals->bx_glue_offset[reg])
  6430. return;
  6431. s = bfd_get_linker_section
  6432. (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
  6433. BFD_ASSERT (s != NULL);
  6434. /* Add symbol for veneer. */
  6435. tmp_name = (char *)
  6436. bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
  6437. BFD_ASSERT (tmp_name);
  6438. sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
  6439. myh = elf_link_hash_lookup
  6440. (&(globals)->root, tmp_name, false, false, false);
  6441. BFD_ASSERT (myh == NULL);
  6442. bh = NULL;
  6443. val = globals->bx_glue_size;
  6444. _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
  6445. tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
  6446. NULL, true, false, &bh);
  6447. myh = (struct elf_link_hash_entry *) bh;
  6448. myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
  6449. myh->forced_local = 1;
  6450. s->size += ARM_BX_VENEER_SIZE;
  6451. globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
  6452. globals->bx_glue_size += ARM_BX_VENEER_SIZE;
  6453. }
  6454. /* Add an entry to the code/data map for section SEC. */
  6455. static void
  6456. elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
  6457. {
  6458. struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
  6459. unsigned int newidx;
  6460. if (sec_data->map == NULL)
  6461. {
  6462. sec_data->map = (elf32_arm_section_map *)
  6463. bfd_malloc (sizeof (elf32_arm_section_map));
  6464. sec_data->mapcount = 0;
  6465. sec_data->mapsize = 1;
  6466. }
  6467. newidx = sec_data->mapcount++;
  6468. if (sec_data->mapcount > sec_data->mapsize)
  6469. {
  6470. sec_data->mapsize *= 2;
  6471. sec_data->map = (elf32_arm_section_map *)
  6472. bfd_realloc_or_free (sec_data->map, sec_data->mapsize
  6473. * sizeof (elf32_arm_section_map));
  6474. }
  6475. if (sec_data->map)
  6476. {
  6477. sec_data->map[newidx].vma = vma;
  6478. sec_data->map[newidx].type = type;
  6479. }
  6480. }
  6481. /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
  6482. veneers are handled for now. */
  6483. static bfd_vma
  6484. record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
  6485. elf32_vfp11_erratum_list *branch,
  6486. bfd *branch_bfd,
  6487. asection *branch_sec,
  6488. unsigned int offset)
  6489. {
  6490. asection *s;
  6491. struct elf32_arm_link_hash_table *hash_table;
  6492. char *tmp_name;
  6493. struct elf_link_hash_entry *myh;
  6494. struct bfd_link_hash_entry *bh;
  6495. bfd_vma val;
  6496. struct _arm_elf_section_data *sec_data;
  6497. elf32_vfp11_erratum_list *newerr;
  6498. hash_table = elf32_arm_hash_table (link_info);
  6499. BFD_ASSERT (hash_table != NULL);
  6500. BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
  6501. s = bfd_get_linker_section
  6502. (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
  6503. sec_data = elf32_arm_section_data (s);
  6504. BFD_ASSERT (s != NULL);
  6505. tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
  6506. (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
  6507. BFD_ASSERT (tmp_name);
  6508. sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
  6509. hash_table->num_vfp11_fixes);
  6510. myh = elf_link_hash_lookup
  6511. (&(hash_table)->root, tmp_name, false, false, false);
  6512. BFD_ASSERT (myh == NULL);
  6513. bh = NULL;
  6514. val = hash_table->vfp11_erratum_glue_size;
  6515. _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
  6516. tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
  6517. NULL, true, false, &bh);
  6518. myh = (struct elf_link_hash_entry *) bh;
  6519. myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
  6520. myh->forced_local = 1;
  6521. /* Link veneer back to calling location. */
  6522. sec_data->erratumcount += 1;
  6523. newerr = (elf32_vfp11_erratum_list *)
  6524. bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
  6525. newerr->type = VFP11_ERRATUM_ARM_VENEER;
  6526. newerr->vma = -1;
  6527. newerr->u.v.branch = branch;
  6528. newerr->u.v.id = hash_table->num_vfp11_fixes;
  6529. branch->u.b.veneer = newerr;
  6530. newerr->next = sec_data->erratumlist;
  6531. sec_data->erratumlist = newerr;
  6532. /* A symbol for the return from the veneer. */
  6533. sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
  6534. hash_table->num_vfp11_fixes);
  6535. myh = elf_link_hash_lookup
  6536. (&(hash_table)->root, tmp_name, false, false, false);
  6537. if (myh != NULL)
  6538. abort ();
  6539. bh = NULL;
  6540. val = offset + 4;
  6541. _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
  6542. branch_sec, val, NULL, true, false, &bh);
  6543. myh = (struct elf_link_hash_entry *) bh;
  6544. myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
  6545. myh->forced_local = 1;
  6546. free (tmp_name);
  6547. /* Generate a mapping symbol for the veneer section, and explicitly add an
  6548. entry for that symbol to the code/data map for the section. */
  6549. if (hash_table->vfp11_erratum_glue_size == 0)
  6550. {
  6551. bh = NULL;
  6552. /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
  6553. ever requires this erratum fix. */
  6554. _bfd_generic_link_add_one_symbol (link_info,
  6555. hash_table->bfd_of_glue_owner, "$a",
  6556. BSF_LOCAL, s, 0, NULL,
  6557. true, false, &bh);
  6558. myh = (struct elf_link_hash_entry *) bh;
  6559. myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
  6560. myh->forced_local = 1;
  6561. /* The elf32_arm_init_maps function only cares about symbols from input
  6562. BFDs. We must make a note of this generated mapping symbol
  6563. ourselves so that code byteswapping works properly in
  6564. elf32_arm_write_section. */
  6565. elf32_arm_section_map_add (s, 'a', 0);
  6566. }
  6567. s->size += VFP11_ERRATUM_VENEER_SIZE;
  6568. hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
  6569. hash_table->num_vfp11_fixes++;
  6570. /* The offset of the veneer. */
  6571. return val;
  6572. }
  6573. /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
  6574. veneers need to be handled because used only in Cortex-M. */
  6575. static bfd_vma
  6576. record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
  6577. elf32_stm32l4xx_erratum_list *branch,
  6578. bfd *branch_bfd,
  6579. asection *branch_sec,
  6580. unsigned int offset,
  6581. bfd_size_type veneer_size)
  6582. {
  6583. asection *s;
  6584. struct elf32_arm_link_hash_table *hash_table;
  6585. char *tmp_name;
  6586. struct elf_link_hash_entry *myh;
  6587. struct bfd_link_hash_entry *bh;
  6588. bfd_vma val;
  6589. struct _arm_elf_section_data *sec_data;
  6590. elf32_stm32l4xx_erratum_list *newerr;
  6591. hash_table = elf32_arm_hash_table (link_info);
  6592. BFD_ASSERT (hash_table != NULL);
  6593. BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
  6594. s = bfd_get_linker_section
  6595. (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
  6596. BFD_ASSERT (s != NULL);
  6597. sec_data = elf32_arm_section_data (s);
  6598. tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
  6599. (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
  6600. BFD_ASSERT (tmp_name);
  6601. sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
  6602. hash_table->num_stm32l4xx_fixes);
  6603. myh = elf_link_hash_lookup
  6604. (&(hash_table)->root, tmp_name, false, false, false);
  6605. BFD_ASSERT (myh == NULL);
  6606. bh = NULL;
  6607. val = hash_table->stm32l4xx_erratum_glue_size;
  6608. _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
  6609. tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
  6610. NULL, true, false, &bh);
  6611. myh = (struct elf_link_hash_entry *) bh;
  6612. myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
  6613. myh->forced_local = 1;
  6614. /* Link veneer back to calling location. */
  6615. sec_data->stm32l4xx_erratumcount += 1;
  6616. newerr = (elf32_stm32l4xx_erratum_list *)
  6617. bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
  6618. newerr->type = STM32L4XX_ERRATUM_VENEER;
  6619. newerr->vma = -1;
  6620. newerr->u.v.branch = branch;
  6621. newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
  6622. branch->u.b.veneer = newerr;
  6623. newerr->next = sec_data->stm32l4xx_erratumlist;
  6624. sec_data->stm32l4xx_erratumlist = newerr;
  6625. /* A symbol for the return from the veneer. */
  6626. sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
  6627. hash_table->num_stm32l4xx_fixes);
  6628. myh = elf_link_hash_lookup
  6629. (&(hash_table)->root, tmp_name, false, false, false);
  6630. if (myh != NULL)
  6631. abort ();
  6632. bh = NULL;
  6633. val = offset + 4;
  6634. _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
  6635. branch_sec, val, NULL, true, false, &bh);
  6636. myh = (struct elf_link_hash_entry *) bh;
  6637. myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
  6638. myh->forced_local = 1;
  6639. free (tmp_name);
  6640. /* Generate a mapping symbol for the veneer section, and explicitly add an
  6641. entry for that symbol to the code/data map for the section. */
  6642. if (hash_table->stm32l4xx_erratum_glue_size == 0)
  6643. {
  6644. bh = NULL;
  6645. /* Creates a THUMB symbol since there is no other choice. */
  6646. _bfd_generic_link_add_one_symbol (link_info,
  6647. hash_table->bfd_of_glue_owner, "$t",
  6648. BSF_LOCAL, s, 0, NULL,
  6649. true, false, &bh);
  6650. myh = (struct elf_link_hash_entry *) bh;
  6651. myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
  6652. myh->forced_local = 1;
  6653. /* The elf32_arm_init_maps function only cares about symbols from input
  6654. BFDs. We must make a note of this generated mapping symbol
  6655. ourselves so that code byteswapping works properly in
  6656. elf32_arm_write_section. */
  6657. elf32_arm_section_map_add (s, 't', 0);
  6658. }
  6659. s->size += veneer_size;
  6660. hash_table->stm32l4xx_erratum_glue_size += veneer_size;
  6661. hash_table->num_stm32l4xx_fixes++;
  6662. /* The offset of the veneer. */
  6663. return val;
  6664. }
  6665. #define ARM_GLUE_SECTION_FLAGS \
  6666. (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
  6667. | SEC_READONLY | SEC_LINKER_CREATED)
  6668. /* Create a fake section for use by the ARM backend of the linker. */
  6669. static bool
  6670. arm_make_glue_section (bfd * abfd, const char * name)
  6671. {
  6672. asection * sec;
  6673. sec = bfd_get_linker_section (abfd, name);
  6674. if (sec != NULL)
  6675. /* Already made. */
  6676. return true;
  6677. sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
  6678. if (sec == NULL
  6679. || !bfd_set_section_alignment (sec, 2))
  6680. return false;
  6681. /* Set the gc mark to prevent the section from being removed by garbage
  6682. collection, despite the fact that no relocs refer to this section. */
  6683. sec->gc_mark = 1;
  6684. return true;
  6685. }
  6686. /* Set size of .plt entries. This function is called from the
  6687. linker scripts in ld/emultempl/{armelf}.em. */
  6688. void
  6689. bfd_elf32_arm_use_long_plt (void)
  6690. {
  6691. elf32_arm_use_long_plt_entry = true;
  6692. }
  6693. /* Add the glue sections to ABFD. This function is called from the
  6694. linker scripts in ld/emultempl/{armelf}.em. */
  6695. bool
  6696. bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
  6697. struct bfd_link_info *info)
  6698. {
  6699. struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
  6700. bool dostm32l4xx = globals
  6701. && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
  6702. bool addglue;
  6703. /* If we are only performing a partial
  6704. link do not bother adding the glue. */
  6705. if (bfd_link_relocatable (info))
  6706. return true;
  6707. addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
  6708. && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
  6709. && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
  6710. && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
  6711. if (!dostm32l4xx)
  6712. return addglue;
  6713. return addglue
  6714. && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
  6715. }
  6716. /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
  6717. ensures they are not marked for deletion by
  6718. strip_excluded_output_sections () when veneers are going to be created
  6719. later. Not doing so would trigger assert on empty section size in
  6720. lang_size_sections_1 (). */
  6721. void
  6722. bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
  6723. {
  6724. enum elf32_arm_stub_type stub_type;
  6725. /* If we are only performing a partial
  6726. link do not bother adding the glue. */
  6727. if (bfd_link_relocatable (info))
  6728. return;
  6729. for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
  6730. {
  6731. asection *out_sec;
  6732. const char *out_sec_name;
  6733. if (!arm_dedicated_stub_output_section_required (stub_type))
  6734. continue;
  6735. out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
  6736. out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
  6737. if (out_sec != NULL)
  6738. out_sec->flags |= SEC_KEEP;
  6739. }
  6740. }
  6741. /* Select a BFD to be used to hold the sections used by the glue code.
  6742. This function is called from the linker scripts in ld/emultempl/
  6743. {armelf/pe}.em. */
  6744. bool
  6745. bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
  6746. {
  6747. struct elf32_arm_link_hash_table *globals;
  6748. /* If we are only performing a partial link
  6749. do not bother getting a bfd to hold the glue. */
  6750. if (bfd_link_relocatable (info))
  6751. return true;
  6752. /* Make sure we don't attach the glue sections to a dynamic object. */
  6753. BFD_ASSERT (!(abfd->flags & DYNAMIC));
  6754. globals = elf32_arm_hash_table (info);
  6755. BFD_ASSERT (globals != NULL);
  6756. if (globals->bfd_of_glue_owner != NULL)
  6757. return true;
  6758. /* Save the bfd for later use. */
  6759. globals->bfd_of_glue_owner = abfd;
  6760. return true;
  6761. }
  6762. static void
  6763. check_use_blx (struct elf32_arm_link_hash_table *globals)
  6764. {
  6765. int cpu_arch;
  6766. cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
  6767. Tag_CPU_arch);
  6768. if (globals->fix_arm1176)
  6769. {
  6770. if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
  6771. globals->use_blx = 1;
  6772. }
  6773. else
  6774. {
  6775. if (cpu_arch > TAG_CPU_ARCH_V4T)
  6776. globals->use_blx = 1;
  6777. }
  6778. }
  6779. bool
  6780. bfd_elf32_arm_process_before_allocation (bfd *abfd,
  6781. struct bfd_link_info *link_info)
  6782. {
  6783. Elf_Internal_Shdr *symtab_hdr;
  6784. Elf_Internal_Rela *internal_relocs = NULL;
  6785. Elf_Internal_Rela *irel, *irelend;
  6786. bfd_byte *contents = NULL;
  6787. asection *sec;
  6788. struct elf32_arm_link_hash_table *globals;
  6789. /* If we are only performing a partial link do not bother
  6790. to construct any glue. */
  6791. if (bfd_link_relocatable (link_info))
  6792. return true;
  6793. /* Here we have a bfd that is to be included on the link. We have a
  6794. hook to do reloc rummaging, before section sizes are nailed down. */
  6795. globals = elf32_arm_hash_table (link_info);
  6796. BFD_ASSERT (globals != NULL);
  6797. check_use_blx (globals);
  6798. if (globals->byteswap_code && !bfd_big_endian (abfd))
  6799. {
  6800. _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
  6801. abfd);
  6802. return false;
  6803. }
  6804. /* PR 5398: If we have not decided to include any loadable sections in
  6805. the output then we will not have a glue owner bfd. This is OK, it
  6806. just means that there is nothing else for us to do here. */
  6807. if (globals->bfd_of_glue_owner == NULL)
  6808. return true;
  6809. /* Rummage around all the relocs and map the glue vectors. */
  6810. sec = abfd->sections;
  6811. if (sec == NULL)
  6812. return true;
  6813. for (; sec != NULL; sec = sec->next)
  6814. {
  6815. if (sec->reloc_count == 0)
  6816. continue;
  6817. if ((sec->flags & SEC_EXCLUDE) != 0)
  6818. continue;
  6819. symtab_hdr = & elf_symtab_hdr (abfd);
  6820. /* Load the relocs. */
  6821. internal_relocs
  6822. = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, false);
  6823. if (internal_relocs == NULL)
  6824. goto error_return;
  6825. irelend = internal_relocs + sec->reloc_count;
  6826. for (irel = internal_relocs; irel < irelend; irel++)
  6827. {
  6828. long r_type;
  6829. unsigned long r_index;
  6830. struct elf_link_hash_entry *h;
  6831. r_type = ELF32_R_TYPE (irel->r_info);
  6832. r_index = ELF32_R_SYM (irel->r_info);
  6833. /* These are the only relocation types we care about. */
  6834. if ( r_type != R_ARM_PC24
  6835. && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
  6836. continue;
  6837. /* Get the section contents if we haven't done so already. */
  6838. if (contents == NULL)
  6839. {
  6840. /* Get cached copy if it exists. */
  6841. if (elf_section_data (sec)->this_hdr.contents != NULL)
  6842. contents = elf_section_data (sec)->this_hdr.contents;
  6843. else
  6844. {
  6845. /* Go get them off disk. */
  6846. if (! bfd_malloc_and_get_section (abfd, sec, &contents))
  6847. goto error_return;
  6848. }
  6849. }
  6850. if (r_type == R_ARM_V4BX)
  6851. {
  6852. int reg;
  6853. reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
  6854. record_arm_bx_glue (link_info, reg);
  6855. continue;
  6856. }
  6857. /* If the relocation is not against a symbol it cannot concern us. */
  6858. h = NULL;
  6859. /* We don't care about local symbols. */
  6860. if (r_index < symtab_hdr->sh_info)
  6861. continue;
  6862. /* This is an external symbol. */
  6863. r_index -= symtab_hdr->sh_info;
  6864. h = (struct elf_link_hash_entry *)
  6865. elf_sym_hashes (abfd)[r_index];
  6866. /* If the relocation is against a static symbol it must be within
  6867. the current section and so cannot be a cross ARM/Thumb relocation. */
  6868. if (h == NULL)
  6869. continue;
  6870. /* If the call will go through a PLT entry then we do not need
  6871. glue. */
  6872. if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
  6873. continue;
  6874. switch (r_type)
  6875. {
  6876. case R_ARM_PC24:
  6877. /* This one is a call from arm code. We need to look up
  6878. the target of the call. If it is a thumb target, we
  6879. insert glue. */
  6880. if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
  6881. == ST_BRANCH_TO_THUMB)
  6882. record_arm_to_thumb_glue (link_info, h);
  6883. break;
  6884. default:
  6885. abort ();
  6886. }
  6887. }
  6888. if (elf_section_data (sec)->this_hdr.contents != contents)
  6889. free (contents);
  6890. contents = NULL;
  6891. if (elf_section_data (sec)->relocs != internal_relocs)
  6892. free (internal_relocs);
  6893. internal_relocs = NULL;
  6894. }
  6895. return true;
  6896. error_return:
  6897. if (elf_section_data (sec)->this_hdr.contents != contents)
  6898. free (contents);
  6899. if (elf_section_data (sec)->relocs != internal_relocs)
  6900. free (internal_relocs);
  6901. return false;
  6902. }
  6903. #endif
  6904. /* Initialise maps of ARM/Thumb/data for input BFDs. */
  6905. void
  6906. bfd_elf32_arm_init_maps (bfd *abfd)
  6907. {
  6908. Elf_Internal_Sym *isymbuf;
  6909. Elf_Internal_Shdr *hdr;
  6910. unsigned int i, localsyms;
  6911. /* PR 7093: Make sure that we are dealing with an arm elf binary. */
  6912. if (! is_arm_elf (abfd))
  6913. return;
  6914. if ((abfd->flags & DYNAMIC) != 0)
  6915. return;
  6916. hdr = & elf_symtab_hdr (abfd);
  6917. localsyms = hdr->sh_info;
  6918. /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
  6919. should contain the number of local symbols, which should come before any
  6920. global symbols. Mapping symbols are always local. */
  6921. isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
  6922. NULL);
  6923. /* No internal symbols read? Skip this BFD. */
  6924. if (isymbuf == NULL)
  6925. return;
  6926. for (i = 0; i < localsyms; i++)
  6927. {
  6928. Elf_Internal_Sym *isym = &isymbuf[i];
  6929. asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
  6930. const char *name;
  6931. if (sec != NULL
  6932. && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
  6933. {
  6934. name = bfd_elf_string_from_elf_section (abfd,
  6935. hdr->sh_link, isym->st_name);
  6936. if (bfd_is_arm_special_symbol_name (name,
  6937. BFD_ARM_SPECIAL_SYM_TYPE_MAP))
  6938. elf32_arm_section_map_add (sec, name[1], isym->st_value);
  6939. }
  6940. }
  6941. }
  6942. /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
  6943. say what they wanted. */
  6944. void
  6945. bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
  6946. {
  6947. struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
  6948. obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
  6949. if (globals == NULL)
  6950. return;
  6951. if (globals->fix_cortex_a8 == -1)
  6952. {
  6953. /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
  6954. if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
  6955. && (out_attr[Tag_CPU_arch_profile].i == 'A'
  6956. || out_attr[Tag_CPU_arch_profile].i == 0))
  6957. globals->fix_cortex_a8 = 1;
  6958. else
  6959. globals->fix_cortex_a8 = 0;
  6960. }
  6961. }
  6962. void
  6963. bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
  6964. {
  6965. struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
  6966. obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
  6967. if (globals == NULL)
  6968. return;
  6969. /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
  6970. if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
  6971. {
  6972. switch (globals->vfp11_fix)
  6973. {
  6974. case BFD_ARM_VFP11_FIX_DEFAULT:
  6975. case BFD_ARM_VFP11_FIX_NONE:
  6976. globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
  6977. break;
  6978. default:
  6979. /* Give a warning, but do as the user requests anyway. */
  6980. _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
  6981. "workaround is not necessary for target architecture"), obfd);
  6982. }
  6983. }
  6984. else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
  6985. /* For earlier architectures, we might need the workaround, but do not
  6986. enable it by default. If users is running with broken hardware, they
  6987. must enable the erratum fix explicitly. */
  6988. globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
  6989. }
  6990. void
  6991. bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
  6992. {
  6993. struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
  6994. obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
  6995. if (globals == NULL)
  6996. return;
  6997. /* We assume only Cortex-M4 may require the fix. */
  6998. if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
  6999. || out_attr[Tag_CPU_arch_profile].i != 'M')
  7000. {
  7001. if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
  7002. /* Give a warning, but do as the user requests anyway. */
  7003. _bfd_error_handler
  7004. (_("%pB: warning: selected STM32L4XX erratum "
  7005. "workaround is not necessary for target architecture"), obfd);
  7006. }
  7007. }
  7008. enum bfd_arm_vfp11_pipe
  7009. {
  7010. VFP11_FMAC,
  7011. VFP11_LS,
  7012. VFP11_DS,
  7013. VFP11_BAD
  7014. };
  7015. /* Return a VFP register number. This is encoded as RX:X for single-precision
  7016. registers, or X:RX for double-precision registers, where RX is the group of
  7017. four bits in the instruction encoding and X is the single extension bit.
  7018. RX and X fields are specified using their lowest (starting) bit. The return
  7019. value is:
  7020. 0...31: single-precision registers s0...s31
  7021. 32...63: double-precision registers d0...d31.
  7022. Although X should be zero for VFP11 (encoding d0...d15 only), we might
  7023. encounter VFP3 instructions, so we allow the full range for DP registers. */
  7024. static unsigned int
  7025. bfd_arm_vfp11_regno (unsigned int insn, bool is_double, unsigned int rx,
  7026. unsigned int x)
  7027. {
  7028. if (is_double)
  7029. return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
  7030. else
  7031. return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
  7032. }
  7033. /* Set bits in *WMASK according to a register number REG as encoded by
  7034. bfd_arm_vfp11_regno(). Ignore d16-d31. */
  7035. static void
  7036. bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
  7037. {
  7038. if (reg < 32)
  7039. *wmask |= 1 << reg;
  7040. else if (reg < 48)
  7041. *wmask |= 3 << ((reg - 32) * 2);
  7042. }
  7043. /* Return TRUE if WMASK overwrites anything in REGS. */
  7044. static bool
  7045. bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
  7046. {
  7047. int i;
  7048. for (i = 0; i < numregs; i++)
  7049. {
  7050. unsigned int reg = regs[i];
  7051. if (reg < 32 && (wmask & (1 << reg)) != 0)
  7052. return true;
  7053. reg -= 32;
  7054. if (reg >= 16)
  7055. continue;
  7056. if ((wmask & (3 << (reg * 2))) != 0)
  7057. return true;
  7058. }
  7059. return false;
  7060. }
  7061. /* In this function, we're interested in two things: finding input registers
  7062. for VFP data-processing instructions, and finding the set of registers which
  7063. arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
  7064. hold the written set, so FLDM etc. are easy to deal with (we're only
  7065. interested in 32 SP registers or 16 dp registers, due to the VFP version
  7066. implemented by the chip in question). DP registers are marked by setting
  7067. both SP registers in the write mask). */
  7068. static enum bfd_arm_vfp11_pipe
  7069. bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
  7070. int *numregs)
  7071. {
  7072. enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
  7073. bool is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
  7074. if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
  7075. {
  7076. unsigned int pqrs;
  7077. unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
  7078. unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
  7079. pqrs = ((insn & 0x00800000) >> 20)
  7080. | ((insn & 0x00300000) >> 19)
  7081. | ((insn & 0x00000040) >> 6);
  7082. switch (pqrs)
  7083. {
  7084. case 0: /* fmac[sd]. */
  7085. case 1: /* fnmac[sd]. */
  7086. case 2: /* fmsc[sd]. */
  7087. case 3: /* fnmsc[sd]. */
  7088. vpipe = VFP11_FMAC;
  7089. bfd_arm_vfp11_write_mask (destmask, fd);
  7090. regs[0] = fd;
  7091. regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
  7092. regs[2] = fm;
  7093. *numregs = 3;
  7094. break;
  7095. case 4: /* fmul[sd]. */
  7096. case 5: /* fnmul[sd]. */
  7097. case 6: /* fadd[sd]. */
  7098. case 7: /* fsub[sd]. */
  7099. vpipe = VFP11_FMAC;
  7100. goto vfp_binop;
  7101. case 8: /* fdiv[sd]. */
  7102. vpipe = VFP11_DS;
  7103. vfp_binop:
  7104. bfd_arm_vfp11_write_mask (destmask, fd);
  7105. regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
  7106. regs[1] = fm;
  7107. *numregs = 2;
  7108. break;
  7109. case 15: /* extended opcode. */
  7110. {
  7111. unsigned int extn = ((insn >> 15) & 0x1e)
  7112. | ((insn >> 7) & 1);
  7113. switch (extn)
  7114. {
  7115. case 0: /* fcpy[sd]. */
  7116. case 1: /* fabs[sd]. */
  7117. case 2: /* fneg[sd]. */
  7118. case 8: /* fcmp[sd]. */
  7119. case 9: /* fcmpe[sd]. */
  7120. case 10: /* fcmpz[sd]. */
  7121. case 11: /* fcmpez[sd]. */
  7122. case 16: /* fuito[sd]. */
  7123. case 17: /* fsito[sd]. */
  7124. case 24: /* ftoui[sd]. */
  7125. case 25: /* ftouiz[sd]. */
  7126. case 26: /* ftosi[sd]. */
  7127. case 27: /* ftosiz[sd]. */
  7128. /* These instructions will not bounce due to underflow. */
  7129. *numregs = 0;
  7130. vpipe = VFP11_FMAC;
  7131. break;
  7132. case 3: /* fsqrt[sd]. */
  7133. /* fsqrt cannot underflow, but it can (perhaps) overwrite
  7134. registers to cause the erratum in previous instructions. */
  7135. bfd_arm_vfp11_write_mask (destmask, fd);
  7136. vpipe = VFP11_DS;
  7137. break;
  7138. case 15: /* fcvt{ds,sd}. */
  7139. {
  7140. int rnum = 0;
  7141. bfd_arm_vfp11_write_mask (destmask, fd);
  7142. /* Only FCVTSD can underflow. */
  7143. if ((insn & 0x100) != 0)
  7144. regs[rnum++] = fm;
  7145. *numregs = rnum;
  7146. vpipe = VFP11_FMAC;
  7147. }
  7148. break;
  7149. default:
  7150. return VFP11_BAD;
  7151. }
  7152. }
  7153. break;
  7154. default:
  7155. return VFP11_BAD;
  7156. }
  7157. }
  7158. /* Two-register transfer. */
  7159. else if ((insn & 0x0fe00ed0) == 0x0c400a10)
  7160. {
  7161. unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
  7162. if ((insn & 0x100000) == 0)
  7163. {
  7164. if (is_double)
  7165. bfd_arm_vfp11_write_mask (destmask, fm);
  7166. else
  7167. {
  7168. bfd_arm_vfp11_write_mask (destmask, fm);
  7169. bfd_arm_vfp11_write_mask (destmask, fm + 1);
  7170. }
  7171. }
  7172. vpipe = VFP11_LS;
  7173. }
  7174. else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
  7175. {
  7176. int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
  7177. unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
  7178. switch (puw)
  7179. {
  7180. case 0: /* Two-reg transfer. We should catch these above. */
  7181. abort ();
  7182. case 2: /* fldm[sdx]. */
  7183. case 3:
  7184. case 5:
  7185. {
  7186. unsigned int i, offset = insn & 0xff;
  7187. if (is_double)
  7188. offset >>= 1;
  7189. for (i = fd; i < fd + offset; i++)
  7190. bfd_arm_vfp11_write_mask (destmask, i);
  7191. }
  7192. break;
  7193. case 4: /* fld[sd]. */
  7194. case 6:
  7195. bfd_arm_vfp11_write_mask (destmask, fd);
  7196. break;
  7197. default:
  7198. return VFP11_BAD;
  7199. }
  7200. vpipe = VFP11_LS;
  7201. }
  7202. /* Single-register transfer. Note L==0. */
  7203. else if ((insn & 0x0f100e10) == 0x0e000a10)
  7204. {
  7205. unsigned int opcode = (insn >> 21) & 7;
  7206. unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
  7207. switch (opcode)
  7208. {
  7209. case 0: /* fmsr/fmdlr. */
  7210. case 1: /* fmdhr. */
  7211. /* Mark fmdhr and fmdlr as writing to the whole of the DP
  7212. destination register. I don't know if this is exactly right,
  7213. but it is the conservative choice. */
  7214. bfd_arm_vfp11_write_mask (destmask, fn);
  7215. break;
  7216. case 7: /* fmxr. */
  7217. break;
  7218. }
  7219. vpipe = VFP11_LS;
  7220. }
  7221. return vpipe;
  7222. }
  7223. static int elf32_arm_compare_mapping (const void * a, const void * b);
  7224. /* Look for potentially-troublesome code sequences which might trigger the
  7225. VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
  7226. (available from ARM) for details of the erratum. A short version is
  7227. described in ld.texinfo. */
  7228. bool
  7229. bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
  7230. {
  7231. asection *sec;
  7232. bfd_byte *contents = NULL;
  7233. int state = 0;
  7234. int regs[3], numregs = 0;
  7235. struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
  7236. int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
  7237. if (globals == NULL)
  7238. return false;
  7239. /* We use a simple FSM to match troublesome VFP11 instruction sequences.
  7240. The states transition as follows:
  7241. 0 -> 1 (vector) or 0 -> 2 (scalar)
  7242. A VFP FMAC-pipeline instruction has been seen. Fill
  7243. regs[0]..regs[numregs-1] with its input operands. Remember this
  7244. instruction in 'first_fmac'.
  7245. 1 -> 2
  7246. Any instruction, except for a VFP instruction which overwrites
  7247. regs[*].
  7248. 1 -> 3 [ -> 0 ] or
  7249. 2 -> 3 [ -> 0 ]
  7250. A VFP instruction has been seen which overwrites any of regs[*].
  7251. We must make a veneer! Reset state to 0 before examining next
  7252. instruction.
  7253. 2 -> 0
  7254. If we fail to match anything in state 2, reset to state 0 and reset
  7255. the instruction pointer to the instruction after 'first_fmac'.
  7256. If the VFP11 vector mode is in use, there must be at least two unrelated
  7257. instructions between anti-dependent VFP11 instructions to properly avoid
  7258. triggering the erratum, hence the use of the extra state 1. */
  7259. /* If we are only performing a partial link do not bother
  7260. to construct any glue. */
  7261. if (bfd_link_relocatable (link_info))
  7262. return true;
  7263. /* Skip if this bfd does not correspond to an ELF image. */
  7264. if (! is_arm_elf (abfd))
  7265. return true;
  7266. /* We should have chosen a fix type by the time we get here. */
  7267. BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
  7268. if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
  7269. return true;
  7270. /* Skip this BFD if it corresponds to an executable or dynamic object. */
  7271. if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
  7272. return true;
  7273. for (sec = abfd->sections; sec != NULL; sec = sec->next)
  7274. {
  7275. unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
  7276. struct _arm_elf_section_data *sec_data;
  7277. /* If we don't have executable progbits, we're not interested in this
  7278. section. Also skip if section is to be excluded. */
  7279. if (elf_section_type (sec) != SHT_PROGBITS
  7280. || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
  7281. || (sec->flags & SEC_EXCLUDE) != 0
  7282. || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
  7283. || sec->output_section == bfd_abs_section_ptr
  7284. || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
  7285. continue;
  7286. sec_data = elf32_arm_section_data (sec);
  7287. if (sec_data->mapcount == 0)
  7288. continue;
  7289. if (elf_section_data (sec)->this_hdr.contents != NULL)
  7290. contents = elf_section_data (sec)->this_hdr.contents;
  7291. else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
  7292. goto error_return;
  7293. qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
  7294. elf32_arm_compare_mapping);
  7295. for (span = 0; span < sec_data->mapcount; span++)
  7296. {
  7297. unsigned int span_start = sec_data->map[span].vma;
  7298. unsigned int span_end = (span == sec_data->mapcount - 1)
  7299. ? sec->size : sec_data->map[span + 1].vma;
  7300. char span_type = sec_data->map[span].type;
  7301. /* FIXME: Only ARM mode is supported at present. We may need to
  7302. support Thumb-2 mode also at some point. */
  7303. if (span_type != 'a')
  7304. continue;
  7305. for (i = span_start; i < span_end;)
  7306. {
  7307. unsigned int next_i = i + 4;
  7308. unsigned int insn = bfd_big_endian (abfd)
  7309. ? (((unsigned) contents[i] << 24)
  7310. | (contents[i + 1] << 16)
  7311. | (contents[i + 2] << 8)
  7312. | contents[i + 3])
  7313. : (((unsigned) contents[i + 3] << 24)
  7314. | (contents[i + 2] << 16)
  7315. | (contents[i + 1] << 8)
  7316. | contents[i]);
  7317. unsigned int writemask = 0;
  7318. enum bfd_arm_vfp11_pipe vpipe;
  7319. switch (state)
  7320. {
  7321. case 0:
  7322. vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
  7323. &numregs);
  7324. /* I'm assuming the VFP11 erratum can trigger with denorm
  7325. operands on either the FMAC or the DS pipeline. This might
  7326. lead to slightly overenthusiastic veneer insertion. */
  7327. if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
  7328. {
  7329. state = use_vector ? 1 : 2;
  7330. first_fmac = i;
  7331. veneer_of_insn = insn;
  7332. }
  7333. break;
  7334. case 1:
  7335. {
  7336. int other_regs[3], other_numregs;
  7337. vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
  7338. other_regs,
  7339. &other_numregs);
  7340. if (vpipe != VFP11_BAD
  7341. && bfd_arm_vfp11_antidependency (writemask, regs,
  7342. numregs))
  7343. state = 3;
  7344. else
  7345. state = 2;
  7346. }
  7347. break;
  7348. case 2:
  7349. {
  7350. int other_regs[3], other_numregs;
  7351. vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
  7352. other_regs,
  7353. &other_numregs);
  7354. if (vpipe != VFP11_BAD
  7355. && bfd_arm_vfp11_antidependency (writemask, regs,
  7356. numregs))
  7357. state = 3;
  7358. else
  7359. {
  7360. state = 0;
  7361. next_i = first_fmac + 4;
  7362. }
  7363. }
  7364. break;
  7365. case 3:
  7366. abort (); /* Should be unreachable. */
  7367. }
  7368. if (state == 3)
  7369. {
  7370. elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
  7371. bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
  7372. elf32_arm_section_data (sec)->erratumcount += 1;
  7373. newerr->u.b.vfp_insn = veneer_of_insn;
  7374. switch (span_type)
  7375. {
  7376. case 'a':
  7377. newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
  7378. break;
  7379. default:
  7380. abort ();
  7381. }
  7382. record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
  7383. first_fmac);
  7384. newerr->vma = -1;
  7385. newerr->next = sec_data->erratumlist;
  7386. sec_data->erratumlist = newerr;
  7387. state = 0;
  7388. }
  7389. i = next_i;
  7390. }
  7391. }
  7392. if (elf_section_data (sec)->this_hdr.contents != contents)
  7393. free (contents);
  7394. contents = NULL;
  7395. }
  7396. return true;
  7397. error_return:
  7398. if (elf_section_data (sec)->this_hdr.contents != contents)
  7399. free (contents);
  7400. return false;
  7401. }
  7402. /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
  7403. after sections have been laid out, using specially-named symbols. */
  7404. void
  7405. bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
  7406. struct bfd_link_info *link_info)
  7407. {
  7408. asection *sec;
  7409. struct elf32_arm_link_hash_table *globals;
  7410. char *tmp_name;
  7411. if (bfd_link_relocatable (link_info))
  7412. return;
  7413. /* Skip if this bfd does not correspond to an ELF image. */
  7414. if (! is_arm_elf (abfd))
  7415. return;
  7416. globals = elf32_arm_hash_table (link_info);
  7417. if (globals == NULL)
  7418. return;
  7419. tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
  7420. (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
  7421. BFD_ASSERT (tmp_name);
  7422. for (sec = abfd->sections; sec != NULL; sec = sec->next)
  7423. {
  7424. struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
  7425. elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
  7426. for (; errnode != NULL; errnode = errnode->next)
  7427. {
  7428. struct elf_link_hash_entry *myh;
  7429. bfd_vma vma;
  7430. switch (errnode->type)
  7431. {
  7432. case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
  7433. case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
  7434. /* Find veneer symbol. */
  7435. sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
  7436. errnode->u.b.veneer->u.v.id);
  7437. myh = elf_link_hash_lookup
  7438. (&(globals)->root, tmp_name, false, false, true);
  7439. if (myh == NULL)
  7440. _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
  7441. abfd, "VFP11", tmp_name);
  7442. vma = myh->root.u.def.section->output_section->vma
  7443. + myh->root.u.def.section->output_offset
  7444. + myh->root.u.def.value;
  7445. errnode->u.b.veneer->vma = vma;
  7446. break;
  7447. case VFP11_ERRATUM_ARM_VENEER:
  7448. case VFP11_ERRATUM_THUMB_VENEER:
  7449. /* Find return location. */
  7450. sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
  7451. errnode->u.v.id);
  7452. myh = elf_link_hash_lookup
  7453. (&(globals)->root, tmp_name, false, false, true);
  7454. if (myh == NULL)
  7455. _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
  7456. abfd, "VFP11", tmp_name);
  7457. vma = myh->root.u.def.section->output_section->vma
  7458. + myh->root.u.def.section->output_offset
  7459. + myh->root.u.def.value;
  7460. errnode->u.v.branch->vma = vma;
  7461. break;
  7462. default:
  7463. abort ();
  7464. }
  7465. }
  7466. }
  7467. free (tmp_name);
  7468. }
  7469. /* Find virtual-memory addresses for STM32L4XX erratum veneers and
  7470. return locations after sections have been laid out, using
  7471. specially-named symbols. */
  7472. void
  7473. bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
  7474. struct bfd_link_info *link_info)
  7475. {
  7476. asection *sec;
  7477. struct elf32_arm_link_hash_table *globals;
  7478. char *tmp_name;
  7479. if (bfd_link_relocatable (link_info))
  7480. return;
  7481. /* Skip if this bfd does not correspond to an ELF image. */
  7482. if (! is_arm_elf (abfd))
  7483. return;
  7484. globals = elf32_arm_hash_table (link_info);
  7485. if (globals == NULL)
  7486. return;
  7487. tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
  7488. (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
  7489. BFD_ASSERT (tmp_name);
  7490. for (sec = abfd->sections; sec != NULL; sec = sec->next)
  7491. {
  7492. struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
  7493. elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
  7494. for (; errnode != NULL; errnode = errnode->next)
  7495. {
  7496. struct elf_link_hash_entry *myh;
  7497. bfd_vma vma;
  7498. switch (errnode->type)
  7499. {
  7500. case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
  7501. /* Find veneer symbol. */
  7502. sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
  7503. errnode->u.b.veneer->u.v.id);
  7504. myh = elf_link_hash_lookup
  7505. (&(globals)->root, tmp_name, false, false, true);
  7506. if (myh == NULL)
  7507. _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
  7508. abfd, "STM32L4XX", tmp_name);
  7509. vma = myh->root.u.def.section->output_section->vma
  7510. + myh->root.u.def.section->output_offset
  7511. + myh->root.u.def.value;
  7512. errnode->u.b.veneer->vma = vma;
  7513. break;
  7514. case STM32L4XX_ERRATUM_VENEER:
  7515. /* Find return location. */
  7516. sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
  7517. errnode->u.v.id);
  7518. myh = elf_link_hash_lookup
  7519. (&(globals)->root, tmp_name, false, false, true);
  7520. if (myh == NULL)
  7521. _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
  7522. abfd, "STM32L4XX", tmp_name);
  7523. vma = myh->root.u.def.section->output_section->vma
  7524. + myh->root.u.def.section->output_offset
  7525. + myh->root.u.def.value;
  7526. errnode->u.v.branch->vma = vma;
  7527. break;
  7528. default:
  7529. abort ();
  7530. }
  7531. }
  7532. }
  7533. free (tmp_name);
  7534. }
  7535. static inline bool
  7536. is_thumb2_ldmia (const insn32 insn)
  7537. {
  7538. /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
  7539. 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
  7540. return (insn & 0xffd02000) == 0xe8900000;
  7541. }
  7542. static inline bool
  7543. is_thumb2_ldmdb (const insn32 insn)
  7544. {
  7545. /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
  7546. 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
  7547. return (insn & 0xffd02000) == 0xe9100000;
  7548. }
  7549. static inline bool
  7550. is_thumb2_vldm (const insn32 insn)
  7551. {
  7552. /* A6.5 Extension register load or store instruction
  7553. A7.7.229
  7554. We look for SP 32-bit and DP 64-bit registers.
  7555. Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
  7556. <list> is consecutive 64-bit registers
  7557. 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
  7558. Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
  7559. <list> is consecutive 32-bit registers
  7560. 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
  7561. if P==0 && U==1 && W==1 && Rn=1101 VPOP
  7562. if PUW=010 || PUW=011 || PUW=101 VLDM. */
  7563. return
  7564. (((insn & 0xfe100f00) == 0xec100b00) ||
  7565. ((insn & 0xfe100f00) == 0xec100a00))
  7566. && /* (IA without !). */
  7567. (((((insn << 7) >> 28) & 0xd) == 0x4)
  7568. /* (IA with !), includes VPOP (when reg number is SP). */
  7569. || ((((insn << 7) >> 28) & 0xd) == 0x5)
  7570. /* (DB with !). */
  7571. || ((((insn << 7) >> 28) & 0xd) == 0x9));
  7572. }
  7573. /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
  7574. VLDM opcode and:
  7575. - computes the number and the mode of memory accesses
  7576. - decides if the replacement should be done:
  7577. . replaces only if > 8-word accesses
  7578. . or (testing purposes only) replaces all accesses. */
  7579. static bool
  7580. stm32l4xx_need_create_replacing_stub (const insn32 insn,
  7581. bfd_arm_stm32l4xx_fix stm32l4xx_fix)
  7582. {
  7583. int nb_words = 0;
  7584. /* The field encoding the register list is the same for both LDMIA
  7585. and LDMDB encodings. */
  7586. if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
  7587. nb_words = elf32_arm_popcount (insn & 0x0000ffff);
  7588. else if (is_thumb2_vldm (insn))
  7589. nb_words = (insn & 0xff);
  7590. /* DEFAULT mode accounts for the real bug condition situation,
  7591. ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
  7592. return (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT
  7593. ? nb_words > 8
  7594. : stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL);
  7595. }
  7596. /* Look for potentially-troublesome code sequences which might trigger
  7597. the STM STM32L4XX erratum. */
  7598. bool
  7599. bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
  7600. struct bfd_link_info *link_info)
  7601. {
  7602. asection *sec;
  7603. bfd_byte *contents = NULL;
  7604. struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
  7605. if (globals == NULL)
  7606. return false;
  7607. /* If we are only performing a partial link do not bother
  7608. to construct any glue. */
  7609. if (bfd_link_relocatable (link_info))
  7610. return true;
  7611. /* Skip if this bfd does not correspond to an ELF image. */
  7612. if (! is_arm_elf (abfd))
  7613. return true;
  7614. if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
  7615. return true;
  7616. /* Skip this BFD if it corresponds to an executable or dynamic object. */
  7617. if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
  7618. return true;
  7619. for (sec = abfd->sections; sec != NULL; sec = sec->next)
  7620. {
  7621. unsigned int i, span;
  7622. struct _arm_elf_section_data *sec_data;
  7623. /* If we don't have executable progbits, we're not interested in this
  7624. section. Also skip if section is to be excluded. */
  7625. if (elf_section_type (sec) != SHT_PROGBITS
  7626. || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
  7627. || (sec->flags & SEC_EXCLUDE) != 0
  7628. || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
  7629. || sec->output_section == bfd_abs_section_ptr
  7630. || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
  7631. continue;
  7632. sec_data = elf32_arm_section_data (sec);
  7633. if (sec_data->mapcount == 0)
  7634. continue;
  7635. if (elf_section_data (sec)->this_hdr.contents != NULL)
  7636. contents = elf_section_data (sec)->this_hdr.contents;
  7637. else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
  7638. goto error_return;
  7639. qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
  7640. elf32_arm_compare_mapping);
  7641. for (span = 0; span < sec_data->mapcount; span++)
  7642. {
  7643. unsigned int span_start = sec_data->map[span].vma;
  7644. unsigned int span_end = (span == sec_data->mapcount - 1)
  7645. ? sec->size : sec_data->map[span + 1].vma;
  7646. char span_type = sec_data->map[span].type;
  7647. int itblock_current_pos = 0;
  7648. /* Only Thumb2 mode need be supported with this CM4 specific
  7649. code, we should not encounter any arm mode eg span_type
  7650. != 'a'. */
  7651. if (span_type != 't')
  7652. continue;
  7653. for (i = span_start; i < span_end;)
  7654. {
  7655. unsigned int insn = bfd_get_16 (abfd, &contents[i]);
  7656. bool insn_32bit = false;
  7657. bool is_ldm = false;
  7658. bool is_vldm = false;
  7659. bool is_not_last_in_it_block = false;
  7660. /* The first 16-bits of all 32-bit thumb2 instructions start
  7661. with opcode[15..13]=0b111 and the encoded op1 can be anything
  7662. except opcode[12..11]!=0b00.
  7663. See 32-bit Thumb instruction encoding. */
  7664. if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
  7665. insn_32bit = true;
  7666. /* Compute the predicate that tells if the instruction
  7667. is concerned by the IT block
  7668. - Creates an error if there is a ldm that is not
  7669. last in the IT block thus cannot be replaced
  7670. - Otherwise we can create a branch at the end of the
  7671. IT block, it will be controlled naturally by IT
  7672. with the proper pseudo-predicate
  7673. - So the only interesting predicate is the one that
  7674. tells that we are not on the last item of an IT
  7675. block. */
  7676. if (itblock_current_pos != 0)
  7677. is_not_last_in_it_block = !!--itblock_current_pos;
  7678. if (insn_32bit)
  7679. {
  7680. /* Load the rest of the insn (in manual-friendly order). */
  7681. insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
  7682. is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
  7683. is_vldm = is_thumb2_vldm (insn);
  7684. /* Veneers are created for (v)ldm depending on
  7685. option flags and memory accesses conditions; but
  7686. if the instruction is not the last instruction of
  7687. an IT block, we cannot create a jump there, so we
  7688. bail out. */
  7689. if ((is_ldm || is_vldm)
  7690. && stm32l4xx_need_create_replacing_stub
  7691. (insn, globals->stm32l4xx_fix))
  7692. {
  7693. if (is_not_last_in_it_block)
  7694. {
  7695. _bfd_error_handler
  7696. /* xgettext:c-format */
  7697. (_("%pB(%pA+%#x): error: multiple load detected"
  7698. " in non-last IT block instruction:"
  7699. " STM32L4XX veneer cannot be generated; "
  7700. "use gcc option -mrestrict-it to generate"
  7701. " only one instruction per IT block"),
  7702. abfd, sec, i);
  7703. }
  7704. else
  7705. {
  7706. elf32_stm32l4xx_erratum_list *newerr =
  7707. (elf32_stm32l4xx_erratum_list *)
  7708. bfd_zmalloc
  7709. (sizeof (elf32_stm32l4xx_erratum_list));
  7710. elf32_arm_section_data (sec)
  7711. ->stm32l4xx_erratumcount += 1;
  7712. newerr->u.b.insn = insn;
  7713. /* We create only thumb branches. */
  7714. newerr->type =
  7715. STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
  7716. record_stm32l4xx_erratum_veneer
  7717. (link_info, newerr, abfd, sec,
  7718. i,
  7719. is_ldm ?
  7720. STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
  7721. STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
  7722. newerr->vma = -1;
  7723. newerr->next = sec_data->stm32l4xx_erratumlist;
  7724. sec_data->stm32l4xx_erratumlist = newerr;
  7725. }
  7726. }
  7727. }
  7728. else
  7729. {
  7730. /* A7.7.37 IT p208
  7731. IT blocks are only encoded in T1
  7732. Encoding T1: IT{x{y{z}}} <firstcond>
  7733. 1 0 1 1 - 1 1 1 1 - firstcond - mask
  7734. if mask = '0000' then see 'related encodings'
  7735. We don't deal with UNPREDICTABLE, just ignore these.
  7736. There can be no nested IT blocks so an IT block
  7737. is naturally a new one for which it is worth
  7738. computing its size. */
  7739. bool is_newitblock = ((insn & 0xff00) == 0xbf00)
  7740. && ((insn & 0x000f) != 0x0000);
  7741. /* If we have a new IT block we compute its size. */
  7742. if (is_newitblock)
  7743. {
  7744. /* Compute the number of instructions controlled
  7745. by the IT block, it will be used to decide
  7746. whether we are inside an IT block or not. */
  7747. unsigned int mask = insn & 0x000f;
  7748. itblock_current_pos = 4 - ctz (mask);
  7749. }
  7750. }
  7751. i += insn_32bit ? 4 : 2;
  7752. }
  7753. }
  7754. if (elf_section_data (sec)->this_hdr.contents != contents)
  7755. free (contents);
  7756. contents = NULL;
  7757. }
  7758. return true;
  7759. error_return:
  7760. if (elf_section_data (sec)->this_hdr.contents != contents)
  7761. free (contents);
  7762. return false;
  7763. }
  7764. /* Set target relocation values needed during linking. */
  7765. void
  7766. bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
  7767. struct bfd_link_info *link_info,
  7768. struct elf32_arm_params *params)
  7769. {
  7770. struct elf32_arm_link_hash_table *globals;
  7771. globals = elf32_arm_hash_table (link_info);
  7772. if (globals == NULL)
  7773. return;
  7774. globals->target1_is_rel = params->target1_is_rel;
  7775. if (globals->fdpic_p)
  7776. globals->target2_reloc = R_ARM_GOT32;
  7777. else if (strcmp (params->target2_type, "rel") == 0)
  7778. globals->target2_reloc = R_ARM_REL32;
  7779. else if (strcmp (params->target2_type, "abs") == 0)
  7780. globals->target2_reloc = R_ARM_ABS32;
  7781. else if (strcmp (params->target2_type, "got-rel") == 0)
  7782. globals->target2_reloc = R_ARM_GOT_PREL;
  7783. else
  7784. {
  7785. _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
  7786. params->target2_type);
  7787. }
  7788. globals->fix_v4bx = params->fix_v4bx;
  7789. globals->use_blx |= params->use_blx;
  7790. globals->vfp11_fix = params->vfp11_denorm_fix;
  7791. globals->stm32l4xx_fix = params->stm32l4xx_fix;
  7792. if (globals->fdpic_p)
  7793. globals->pic_veneer = 1;
  7794. else
  7795. globals->pic_veneer = params->pic_veneer;
  7796. globals->fix_cortex_a8 = params->fix_cortex_a8;
  7797. globals->fix_arm1176 = params->fix_arm1176;
  7798. globals->cmse_implib = params->cmse_implib;
  7799. globals->in_implib_bfd = params->in_implib_bfd;
  7800. BFD_ASSERT (is_arm_elf (output_bfd));
  7801. elf_arm_tdata (output_bfd)->no_enum_size_warning
  7802. = params->no_enum_size_warning;
  7803. elf_arm_tdata (output_bfd)->no_wchar_size_warning
  7804. = params->no_wchar_size_warning;
  7805. }
  7806. /* Replace the target offset of a Thumb bl or b.w instruction. */
  7807. static void
  7808. insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
  7809. {
  7810. bfd_vma upper;
  7811. bfd_vma lower;
  7812. int reloc_sign;
  7813. BFD_ASSERT ((offset & 1) == 0);
  7814. upper = bfd_get_16 (abfd, insn);
  7815. lower = bfd_get_16 (abfd, insn + 2);
  7816. reloc_sign = (offset < 0) ? 1 : 0;
  7817. upper = (upper & ~(bfd_vma) 0x7ff)
  7818. | ((offset >> 12) & 0x3ff)
  7819. | (reloc_sign << 10);
  7820. lower = (lower & ~(bfd_vma) 0x2fff)
  7821. | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
  7822. | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
  7823. | ((offset >> 1) & 0x7ff);
  7824. bfd_put_16 (abfd, upper, insn);
  7825. bfd_put_16 (abfd, lower, insn + 2);
  7826. }
  7827. /* Thumb code calling an ARM function. */
  7828. static int
  7829. elf32_thumb_to_arm_stub (struct bfd_link_info * info,
  7830. const char * name,
  7831. bfd * input_bfd,
  7832. bfd * output_bfd,
  7833. asection * input_section,
  7834. bfd_byte * hit_data,
  7835. asection * sym_sec,
  7836. bfd_vma offset,
  7837. bfd_signed_vma addend,
  7838. bfd_vma val,
  7839. char **error_message)
  7840. {
  7841. asection * s = 0;
  7842. bfd_vma my_offset;
  7843. long int ret_offset;
  7844. struct elf_link_hash_entry * myh;
  7845. struct elf32_arm_link_hash_table * globals;
  7846. myh = find_thumb_glue (info, name, error_message);
  7847. if (myh == NULL)
  7848. return false;
  7849. globals = elf32_arm_hash_table (info);
  7850. BFD_ASSERT (globals != NULL);
  7851. BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
  7852. my_offset = myh->root.u.def.value;
  7853. s = bfd_get_linker_section (globals->bfd_of_glue_owner,
  7854. THUMB2ARM_GLUE_SECTION_NAME);
  7855. BFD_ASSERT (s != NULL);
  7856. BFD_ASSERT (s->contents != NULL);
  7857. BFD_ASSERT (s->output_section != NULL);
  7858. if ((my_offset & 0x01) == 0x01)
  7859. {
  7860. if (sym_sec != NULL
  7861. && sym_sec->owner != NULL
  7862. && !INTERWORK_FLAG (sym_sec->owner))
  7863. {
  7864. _bfd_error_handler
  7865. (_("%pB(%s): warning: interworking not enabled;"
  7866. " first occurrence: %pB: %s call to %s"),
  7867. sym_sec->owner, name, input_bfd, "Thumb", "ARM");
  7868. return false;
  7869. }
  7870. --my_offset;
  7871. myh->root.u.def.value = my_offset;
  7872. put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
  7873. s->contents + my_offset);
  7874. put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
  7875. s->contents + my_offset + 2);
  7876. ret_offset =
  7877. /* Address of destination of the stub. */
  7878. ((bfd_signed_vma) val)
  7879. - ((bfd_signed_vma)
  7880. /* Offset from the start of the current section
  7881. to the start of the stubs. */
  7882. (s->output_offset
  7883. /* Offset of the start of this stub from the start of the stubs. */
  7884. + my_offset
  7885. /* Address of the start of the current section. */
  7886. + s->output_section->vma)
  7887. /* The branch instruction is 4 bytes into the stub. */
  7888. + 4
  7889. /* ARM branches work from the pc of the instruction + 8. */
  7890. + 8);
  7891. put_arm_insn (globals, output_bfd,
  7892. (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
  7893. s->contents + my_offset + 4);
  7894. }
  7895. BFD_ASSERT (my_offset <= globals->thumb_glue_size);
  7896. /* Now go back and fix up the original BL insn to point to here. */
  7897. ret_offset =
  7898. /* Address of where the stub is located. */
  7899. (s->output_section->vma + s->output_offset + my_offset)
  7900. /* Address of where the BL is located. */
  7901. - (input_section->output_section->vma + input_section->output_offset
  7902. + offset)
  7903. /* Addend in the relocation. */
  7904. - addend
  7905. /* Biassing for PC-relative addressing. */
  7906. - 8;
  7907. insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
  7908. return true;
  7909. }
  7910. /* Populate an Arm to Thumb stub. Returns the stub symbol. */
  7911. static struct elf_link_hash_entry *
  7912. elf32_arm_create_thumb_stub (struct bfd_link_info * info,
  7913. const char * name,
  7914. bfd * input_bfd,
  7915. bfd * output_bfd,
  7916. asection * sym_sec,
  7917. bfd_vma val,
  7918. asection * s,
  7919. char ** error_message)
  7920. {
  7921. bfd_vma my_offset;
  7922. long int ret_offset;
  7923. struct elf_link_hash_entry * myh;
  7924. struct elf32_arm_link_hash_table * globals;
  7925. myh = find_arm_glue (info, name, error_message);
  7926. if (myh == NULL)
  7927. return NULL;
  7928. globals = elf32_arm_hash_table (info);
  7929. BFD_ASSERT (globals != NULL);
  7930. BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
  7931. my_offset = myh->root.u.def.value;
  7932. if ((my_offset & 0x01) == 0x01)
  7933. {
  7934. if (sym_sec != NULL
  7935. && sym_sec->owner != NULL
  7936. && !INTERWORK_FLAG (sym_sec->owner))
  7937. {
  7938. _bfd_error_handler
  7939. (_("%pB(%s): warning: interworking not enabled;"
  7940. " first occurrence: %pB: %s call to %s"),
  7941. sym_sec->owner, name, input_bfd, "ARM", "Thumb");
  7942. }
  7943. --my_offset;
  7944. myh->root.u.def.value = my_offset;
  7945. if (bfd_link_pic (info)
  7946. || globals->root.is_relocatable_executable
  7947. || globals->pic_veneer)
  7948. {
  7949. /* For relocatable objects we can't use absolute addresses,
  7950. so construct the address from a relative offset. */
  7951. /* TODO: If the offset is small it's probably worth
  7952. constructing the address with adds. */
  7953. put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
  7954. s->contents + my_offset);
  7955. put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
  7956. s->contents + my_offset + 4);
  7957. put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
  7958. s->contents + my_offset + 8);
  7959. /* Adjust the offset by 4 for the position of the add,
  7960. and 8 for the pipeline offset. */
  7961. ret_offset = (val - (s->output_offset
  7962. + s->output_section->vma
  7963. + my_offset + 12))
  7964. | 1;
  7965. bfd_put_32 (output_bfd, ret_offset,
  7966. s->contents + my_offset + 12);
  7967. }
  7968. else if (globals->use_blx)
  7969. {
  7970. put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
  7971. s->contents + my_offset);
  7972. /* It's a thumb address. Add the low order bit. */
  7973. bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
  7974. s->contents + my_offset + 4);
  7975. }
  7976. else
  7977. {
  7978. put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
  7979. s->contents + my_offset);
  7980. put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
  7981. s->contents + my_offset + 4);
  7982. /* It's a thumb address. Add the low order bit. */
  7983. bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
  7984. s->contents + my_offset + 8);
  7985. my_offset += 12;
  7986. }
  7987. }
  7988. BFD_ASSERT (my_offset <= globals->arm_glue_size);
  7989. return myh;
  7990. }
  7991. /* Arm code calling a Thumb function. */
  7992. static int
  7993. elf32_arm_to_thumb_stub (struct bfd_link_info * info,
  7994. const char * name,
  7995. bfd * input_bfd,
  7996. bfd * output_bfd,
  7997. asection * input_section,
  7998. bfd_byte * hit_data,
  7999. asection * sym_sec,
  8000. bfd_vma offset,
  8001. bfd_signed_vma addend,
  8002. bfd_vma val,
  8003. char **error_message)
  8004. {
  8005. unsigned long int tmp;
  8006. bfd_vma my_offset;
  8007. asection * s;
  8008. long int ret_offset;
  8009. struct elf_link_hash_entry * myh;
  8010. struct elf32_arm_link_hash_table * globals;
  8011. globals = elf32_arm_hash_table (info);
  8012. BFD_ASSERT (globals != NULL);
  8013. BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
  8014. s = bfd_get_linker_section (globals->bfd_of_glue_owner,
  8015. ARM2THUMB_GLUE_SECTION_NAME);
  8016. BFD_ASSERT (s != NULL);
  8017. BFD_ASSERT (s->contents != NULL);
  8018. BFD_ASSERT (s->output_section != NULL);
  8019. myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
  8020. sym_sec, val, s, error_message);
  8021. if (!myh)
  8022. return false;
  8023. my_offset = myh->root.u.def.value;
  8024. tmp = bfd_get_32 (input_bfd, hit_data);
  8025. tmp = tmp & 0xFF000000;
  8026. /* Somehow these are both 4 too far, so subtract 8. */
  8027. ret_offset = (s->output_offset
  8028. + my_offset
  8029. + s->output_section->vma
  8030. - (input_section->output_offset
  8031. + input_section->output_section->vma
  8032. + offset + addend)
  8033. - 8);
  8034. tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
  8035. bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
  8036. return true;
  8037. }
  8038. /* Populate Arm stub for an exported Thumb function. */
  8039. static bool
  8040. elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
  8041. {
  8042. struct bfd_link_info * info = (struct bfd_link_info *) inf;
  8043. asection * s;
  8044. struct elf_link_hash_entry * myh;
  8045. struct elf32_arm_link_hash_entry *eh;
  8046. struct elf32_arm_link_hash_table * globals;
  8047. asection *sec;
  8048. bfd_vma val;
  8049. char *error_message;
  8050. eh = elf32_arm_hash_entry (h);
  8051. /* Allocate stubs for exported Thumb functions on v4t. */
  8052. if (eh->export_glue == NULL)
  8053. return true;
  8054. globals = elf32_arm_hash_table (info);
  8055. BFD_ASSERT (globals != NULL);
  8056. BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
  8057. s = bfd_get_linker_section (globals->bfd_of_glue_owner,
  8058. ARM2THUMB_GLUE_SECTION_NAME);
  8059. BFD_ASSERT (s != NULL);
  8060. BFD_ASSERT (s->contents != NULL);
  8061. BFD_ASSERT (s->output_section != NULL);
  8062. sec = eh->export_glue->root.u.def.section;
  8063. BFD_ASSERT (sec->output_section != NULL);
  8064. val = eh->export_glue->root.u.def.value + sec->output_offset
  8065. + sec->output_section->vma;
  8066. myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
  8067. h->root.u.def.section->owner,
  8068. globals->obfd, sec, val, s,
  8069. &error_message);
  8070. BFD_ASSERT (myh);
  8071. return true;
  8072. }
  8073. /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
  8074. static bfd_vma
  8075. elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
  8076. {
  8077. bfd_byte *p;
  8078. bfd_vma glue_addr;
  8079. asection *s;
  8080. struct elf32_arm_link_hash_table *globals;
  8081. globals = elf32_arm_hash_table (info);
  8082. BFD_ASSERT (globals != NULL);
  8083. BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
  8084. s = bfd_get_linker_section (globals->bfd_of_glue_owner,
  8085. ARM_BX_GLUE_SECTION_NAME);
  8086. BFD_ASSERT (s != NULL);
  8087. BFD_ASSERT (s->contents != NULL);
  8088. BFD_ASSERT (s->output_section != NULL);
  8089. BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
  8090. glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
  8091. if ((globals->bx_glue_offset[reg] & 1) == 0)
  8092. {
  8093. p = s->contents + glue_addr;
  8094. bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
  8095. bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
  8096. bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
  8097. globals->bx_glue_offset[reg] |= 1;
  8098. }
  8099. return glue_addr + s->output_section->vma + s->output_offset;
  8100. }
  8101. /* Generate Arm stubs for exported Thumb symbols. */
  8102. static void
  8103. elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
  8104. struct bfd_link_info *link_info)
  8105. {
  8106. struct elf32_arm_link_hash_table * globals;
  8107. if (link_info == NULL)
  8108. /* Ignore this if we are not called by the ELF backend linker. */
  8109. return;
  8110. globals = elf32_arm_hash_table (link_info);
  8111. if (globals == NULL)
  8112. return;
  8113. /* If blx is available then exported Thumb symbols are OK and there is
  8114. nothing to do. */
  8115. if (globals->use_blx)
  8116. return;
  8117. elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
  8118. link_info);
  8119. }
  8120. /* Reserve space for COUNT dynamic relocations in relocation selection
  8121. SRELOC. */
  8122. static void
  8123. elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
  8124. bfd_size_type count)
  8125. {
  8126. struct elf32_arm_link_hash_table *htab;
  8127. htab = elf32_arm_hash_table (info);
  8128. BFD_ASSERT (htab->root.dynamic_sections_created);
  8129. if (sreloc == NULL)
  8130. abort ();
  8131. sreloc->size += RELOC_SIZE (htab) * count;
  8132. }
  8133. /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
  8134. dynamic, the relocations should go in SRELOC, otherwise they should
  8135. go in the special .rel.iplt section. */
  8136. static void
  8137. elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
  8138. bfd_size_type count)
  8139. {
  8140. struct elf32_arm_link_hash_table *htab;
  8141. htab = elf32_arm_hash_table (info);
  8142. if (!htab->root.dynamic_sections_created)
  8143. htab->root.irelplt->size += RELOC_SIZE (htab) * count;
  8144. else
  8145. {
  8146. BFD_ASSERT (sreloc != NULL);
  8147. sreloc->size += RELOC_SIZE (htab) * count;
  8148. }
  8149. }
  8150. /* Add relocation REL to the end of relocation section SRELOC. */
  8151. static void
  8152. elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
  8153. asection *sreloc, Elf_Internal_Rela *rel)
  8154. {
  8155. bfd_byte *loc;
  8156. struct elf32_arm_link_hash_table *htab;
  8157. htab = elf32_arm_hash_table (info);
  8158. if (!htab->root.dynamic_sections_created
  8159. && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
  8160. sreloc = htab->root.irelplt;
  8161. if (sreloc == NULL)
  8162. abort ();
  8163. loc = sreloc->contents;
  8164. loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
  8165. if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
  8166. abort ();
  8167. SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
  8168. }
  8169. /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
  8170. IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
  8171. to .plt. */
  8172. static void
  8173. elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
  8174. bool is_iplt_entry,
  8175. union gotplt_union *root_plt,
  8176. struct arm_plt_info *arm_plt)
  8177. {
  8178. struct elf32_arm_link_hash_table *htab;
  8179. asection *splt;
  8180. asection *sgotplt;
  8181. htab = elf32_arm_hash_table (info);
  8182. if (is_iplt_entry)
  8183. {
  8184. splt = htab->root.iplt;
  8185. sgotplt = htab->root.igotplt;
  8186. /* NaCl uses a special first entry in .iplt too. */
  8187. if (htab->root.target_os == is_nacl && splt->size == 0)
  8188. splt->size += htab->plt_header_size;
  8189. /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
  8190. elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
  8191. }
  8192. else
  8193. {
  8194. splt = htab->root.splt;
  8195. sgotplt = htab->root.sgotplt;
  8196. if (htab->fdpic_p)
  8197. {
  8198. /* Allocate room for R_ARM_FUNCDESC_VALUE. */
  8199. /* For lazy binding, relocations will be put into .rel.plt, in
  8200. .rel.got otherwise. */
  8201. /* FIXME: today we don't support lazy binding so put it in .rel.got */
  8202. if (info->flags & DF_BIND_NOW)
  8203. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  8204. else
  8205. elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
  8206. }
  8207. else
  8208. {
  8209. /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
  8210. elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
  8211. }
  8212. /* If this is the first .plt entry, make room for the special
  8213. first entry. */
  8214. if (splt->size == 0)
  8215. splt->size += htab->plt_header_size;
  8216. htab->next_tls_desc_index++;
  8217. }
  8218. /* Allocate the PLT entry itself, including any leading Thumb stub. */
  8219. if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
  8220. splt->size += PLT_THUMB_STUB_SIZE;
  8221. root_plt->offset = splt->size;
  8222. splt->size += htab->plt_entry_size;
  8223. /* We also need to make an entry in the .got.plt section, which
  8224. will be placed in the .got section by the linker script. */
  8225. if (is_iplt_entry)
  8226. arm_plt->got_offset = sgotplt->size;
  8227. else
  8228. arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
  8229. if (htab->fdpic_p)
  8230. /* Function descriptor takes 64 bits in GOT. */
  8231. sgotplt->size += 8;
  8232. else
  8233. sgotplt->size += 4;
  8234. }
  8235. static bfd_vma
  8236. arm_movw_immediate (bfd_vma value)
  8237. {
  8238. return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
  8239. }
  8240. static bfd_vma
  8241. arm_movt_immediate (bfd_vma value)
  8242. {
  8243. return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
  8244. }
  8245. /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
  8246. the entry lives in .iplt and resolves to (*SYM_VALUE)().
  8247. Otherwise, DYNINDX is the index of the symbol in the dynamic
  8248. symbol table and SYM_VALUE is undefined.
  8249. ROOT_PLT points to the offset of the PLT entry from the start of its
  8250. section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
  8251. bookkeeping information.
  8252. Returns FALSE if there was a problem. */
  8253. static bool
  8254. elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
  8255. union gotplt_union *root_plt,
  8256. struct arm_plt_info *arm_plt,
  8257. int dynindx, bfd_vma sym_value)
  8258. {
  8259. struct elf32_arm_link_hash_table *htab;
  8260. asection *sgot;
  8261. asection *splt;
  8262. asection *srel;
  8263. bfd_byte *loc;
  8264. bfd_vma plt_index;
  8265. Elf_Internal_Rela rel;
  8266. bfd_vma got_header_size;
  8267. htab = elf32_arm_hash_table (info);
  8268. /* Pick the appropriate sections and sizes. */
  8269. if (dynindx == -1)
  8270. {
  8271. splt = htab->root.iplt;
  8272. sgot = htab->root.igotplt;
  8273. srel = htab->root.irelplt;
  8274. /* There are no reserved entries in .igot.plt, and no special
  8275. first entry in .iplt. */
  8276. got_header_size = 0;
  8277. }
  8278. else
  8279. {
  8280. splt = htab->root.splt;
  8281. sgot = htab->root.sgotplt;
  8282. srel = htab->root.srelplt;
  8283. got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
  8284. }
  8285. BFD_ASSERT (splt != NULL && srel != NULL);
  8286. bfd_vma got_offset, got_address, plt_address;
  8287. bfd_vma got_displacement, initial_got_entry;
  8288. bfd_byte * ptr;
  8289. BFD_ASSERT (sgot != NULL);
  8290. /* Get the offset into the .(i)got.plt table of the entry that
  8291. corresponds to this function. */
  8292. got_offset = (arm_plt->got_offset & -2);
  8293. /* Get the index in the procedure linkage table which
  8294. corresponds to this symbol. This is the index of this symbol
  8295. in all the symbols for which we are making plt entries.
  8296. After the reserved .got.plt entries, all symbols appear in
  8297. the same order as in .plt. */
  8298. if (htab->fdpic_p)
  8299. /* Function descriptor takes 8 bytes. */
  8300. plt_index = (got_offset - got_header_size) / 8;
  8301. else
  8302. plt_index = (got_offset - got_header_size) / 4;
  8303. /* Calculate the address of the GOT entry. */
  8304. got_address = (sgot->output_section->vma
  8305. + sgot->output_offset
  8306. + got_offset);
  8307. /* ...and the address of the PLT entry. */
  8308. plt_address = (splt->output_section->vma
  8309. + splt->output_offset
  8310. + root_plt->offset);
  8311. ptr = splt->contents + root_plt->offset;
  8312. if (htab->root.target_os == is_vxworks && bfd_link_pic (info))
  8313. {
  8314. unsigned int i;
  8315. bfd_vma val;
  8316. for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
  8317. {
  8318. val = elf32_arm_vxworks_shared_plt_entry[i];
  8319. if (i == 2)
  8320. val |= got_address - sgot->output_section->vma;
  8321. if (i == 5)
  8322. val |= plt_index * RELOC_SIZE (htab);
  8323. if (i == 2 || i == 5)
  8324. bfd_put_32 (output_bfd, val, ptr);
  8325. else
  8326. put_arm_insn (htab, output_bfd, val, ptr);
  8327. }
  8328. }
  8329. else if (htab->root.target_os == is_vxworks)
  8330. {
  8331. unsigned int i;
  8332. bfd_vma val;
  8333. for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
  8334. {
  8335. val = elf32_arm_vxworks_exec_plt_entry[i];
  8336. if (i == 2)
  8337. val |= got_address;
  8338. if (i == 4)
  8339. val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
  8340. if (i == 5)
  8341. val |= plt_index * RELOC_SIZE (htab);
  8342. if (i == 2 || i == 5)
  8343. bfd_put_32 (output_bfd, val, ptr);
  8344. else
  8345. put_arm_insn (htab, output_bfd, val, ptr);
  8346. }
  8347. loc = (htab->srelplt2->contents
  8348. + (plt_index * 2 + 1) * RELOC_SIZE (htab));
  8349. /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
  8350. referencing the GOT for this PLT entry. */
  8351. rel.r_offset = plt_address + 8;
  8352. rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
  8353. rel.r_addend = got_offset;
  8354. SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
  8355. loc += RELOC_SIZE (htab);
  8356. /* Create the R_ARM_ABS32 relocation referencing the
  8357. beginning of the PLT for this GOT entry. */
  8358. rel.r_offset = got_address;
  8359. rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
  8360. rel.r_addend = 0;
  8361. SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
  8362. }
  8363. else if (htab->root.target_os == is_nacl)
  8364. {
  8365. /* Calculate the displacement between the PLT slot and the
  8366. common tail that's part of the special initial PLT slot. */
  8367. int32_t tail_displacement
  8368. = ((splt->output_section->vma + splt->output_offset
  8369. + ARM_NACL_PLT_TAIL_OFFSET)
  8370. - (plt_address + htab->plt_entry_size + 4));
  8371. BFD_ASSERT ((tail_displacement & 3) == 0);
  8372. tail_displacement >>= 2;
  8373. BFD_ASSERT ((tail_displacement & 0xff000000) == 0
  8374. || (-tail_displacement & 0xff000000) == 0);
  8375. /* Calculate the displacement between the PLT slot and the entry
  8376. in the GOT. The offset accounts for the value produced by
  8377. adding to pc in the penultimate instruction of the PLT stub. */
  8378. got_displacement = (got_address
  8379. - (plt_address + htab->plt_entry_size));
  8380. /* NaCl does not support interworking at all. */
  8381. BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
  8382. put_arm_insn (htab, output_bfd,
  8383. elf32_arm_nacl_plt_entry[0]
  8384. | arm_movw_immediate (got_displacement),
  8385. ptr + 0);
  8386. put_arm_insn (htab, output_bfd,
  8387. elf32_arm_nacl_plt_entry[1]
  8388. | arm_movt_immediate (got_displacement),
  8389. ptr + 4);
  8390. put_arm_insn (htab, output_bfd,
  8391. elf32_arm_nacl_plt_entry[2],
  8392. ptr + 8);
  8393. put_arm_insn (htab, output_bfd,
  8394. elf32_arm_nacl_plt_entry[3]
  8395. | (tail_displacement & 0x00ffffff),
  8396. ptr + 12);
  8397. }
  8398. else if (htab->fdpic_p)
  8399. {
  8400. const bfd_vma *plt_entry = using_thumb_only (htab)
  8401. ? elf32_arm_fdpic_thumb_plt_entry
  8402. : elf32_arm_fdpic_plt_entry;
  8403. /* Fill-up Thumb stub if needed. */
  8404. if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
  8405. {
  8406. put_thumb_insn (htab, output_bfd,
  8407. elf32_arm_plt_thumb_stub[0], ptr - 4);
  8408. put_thumb_insn (htab, output_bfd,
  8409. elf32_arm_plt_thumb_stub[1], ptr - 2);
  8410. }
  8411. /* As we are using 32 bit instructions even for the Thumb
  8412. version, we have to use 'put_arm_insn' instead of
  8413. 'put_thumb_insn'. */
  8414. put_arm_insn (htab, output_bfd, plt_entry[0], ptr + 0);
  8415. put_arm_insn (htab, output_bfd, plt_entry[1], ptr + 4);
  8416. put_arm_insn (htab, output_bfd, plt_entry[2], ptr + 8);
  8417. put_arm_insn (htab, output_bfd, plt_entry[3], ptr + 12);
  8418. bfd_put_32 (output_bfd, got_offset, ptr + 16);
  8419. if (!(info->flags & DF_BIND_NOW))
  8420. {
  8421. /* funcdesc_value_reloc_offset. */
  8422. bfd_put_32 (output_bfd,
  8423. htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
  8424. ptr + 20);
  8425. put_arm_insn (htab, output_bfd, plt_entry[6], ptr + 24);
  8426. put_arm_insn (htab, output_bfd, plt_entry[7], ptr + 28);
  8427. put_arm_insn (htab, output_bfd, plt_entry[8], ptr + 32);
  8428. put_arm_insn (htab, output_bfd, plt_entry[9], ptr + 36);
  8429. }
  8430. }
  8431. else if (using_thumb_only (htab))
  8432. {
  8433. /* PR ld/16017: Generate thumb only PLT entries. */
  8434. if (!using_thumb2 (htab))
  8435. {
  8436. /* FIXME: We ought to be able to generate thumb-1 PLT
  8437. instructions... */
  8438. _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
  8439. output_bfd);
  8440. return false;
  8441. }
  8442. /* Calculate the displacement between the PLT slot and the entry in
  8443. the GOT. The 12-byte offset accounts for the value produced by
  8444. adding to pc in the 3rd instruction of the PLT stub. */
  8445. got_displacement = got_address - (plt_address + 12);
  8446. /* As we are using 32 bit instructions we have to use 'put_arm_insn'
  8447. instead of 'put_thumb_insn'. */
  8448. put_arm_insn (htab, output_bfd,
  8449. elf32_thumb2_plt_entry[0]
  8450. | ((got_displacement & 0x000000ff) << 16)
  8451. | ((got_displacement & 0x00000700) << 20)
  8452. | ((got_displacement & 0x00000800) >> 1)
  8453. | ((got_displacement & 0x0000f000) >> 12),
  8454. ptr + 0);
  8455. put_arm_insn (htab, output_bfd,
  8456. elf32_thumb2_plt_entry[1]
  8457. | ((got_displacement & 0x00ff0000) )
  8458. | ((got_displacement & 0x07000000) << 4)
  8459. | ((got_displacement & 0x08000000) >> 17)
  8460. | ((got_displacement & 0xf0000000) >> 28),
  8461. ptr + 4);
  8462. put_arm_insn (htab, output_bfd,
  8463. elf32_thumb2_plt_entry[2],
  8464. ptr + 8);
  8465. put_arm_insn (htab, output_bfd,
  8466. elf32_thumb2_plt_entry[3],
  8467. ptr + 12);
  8468. }
  8469. else
  8470. {
  8471. /* Calculate the displacement between the PLT slot and the
  8472. entry in the GOT. The eight-byte offset accounts for the
  8473. value produced by adding to pc in the first instruction
  8474. of the PLT stub. */
  8475. got_displacement = got_address - (plt_address + 8);
  8476. if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
  8477. {
  8478. put_thumb_insn (htab, output_bfd,
  8479. elf32_arm_plt_thumb_stub[0], ptr - 4);
  8480. put_thumb_insn (htab, output_bfd,
  8481. elf32_arm_plt_thumb_stub[1], ptr - 2);
  8482. }
  8483. if (!elf32_arm_use_long_plt_entry)
  8484. {
  8485. BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
  8486. put_arm_insn (htab, output_bfd,
  8487. elf32_arm_plt_entry_short[0]
  8488. | ((got_displacement & 0x0ff00000) >> 20),
  8489. ptr + 0);
  8490. put_arm_insn (htab, output_bfd,
  8491. elf32_arm_plt_entry_short[1]
  8492. | ((got_displacement & 0x000ff000) >> 12),
  8493. ptr+ 4);
  8494. put_arm_insn (htab, output_bfd,
  8495. elf32_arm_plt_entry_short[2]
  8496. | (got_displacement & 0x00000fff),
  8497. ptr + 8);
  8498. #ifdef FOUR_WORD_PLT
  8499. bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
  8500. #endif
  8501. }
  8502. else
  8503. {
  8504. put_arm_insn (htab, output_bfd,
  8505. elf32_arm_plt_entry_long[0]
  8506. | ((got_displacement & 0xf0000000) >> 28),
  8507. ptr + 0);
  8508. put_arm_insn (htab, output_bfd,
  8509. elf32_arm_plt_entry_long[1]
  8510. | ((got_displacement & 0x0ff00000) >> 20),
  8511. ptr + 4);
  8512. put_arm_insn (htab, output_bfd,
  8513. elf32_arm_plt_entry_long[2]
  8514. | ((got_displacement & 0x000ff000) >> 12),
  8515. ptr+ 8);
  8516. put_arm_insn (htab, output_bfd,
  8517. elf32_arm_plt_entry_long[3]
  8518. | (got_displacement & 0x00000fff),
  8519. ptr + 12);
  8520. }
  8521. }
  8522. /* Fill in the entry in the .rel(a).(i)plt section. */
  8523. rel.r_offset = got_address;
  8524. rel.r_addend = 0;
  8525. if (dynindx == -1)
  8526. {
  8527. /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
  8528. The dynamic linker or static executable then calls SYM_VALUE
  8529. to determine the correct run-time value of the .igot.plt entry. */
  8530. rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
  8531. initial_got_entry = sym_value;
  8532. }
  8533. else
  8534. {
  8535. /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
  8536. used by PLT entry. */
  8537. if (htab->fdpic_p)
  8538. {
  8539. rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
  8540. initial_got_entry = 0;
  8541. }
  8542. else
  8543. {
  8544. rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
  8545. initial_got_entry = (splt->output_section->vma
  8546. + splt->output_offset);
  8547. /* PR ld/16017
  8548. When thumb only we need to set the LSB for any address that
  8549. will be used with an interworking branch instruction. */
  8550. if (using_thumb_only (htab))
  8551. initial_got_entry |= 1;
  8552. }
  8553. }
  8554. /* Fill in the entry in the global offset table. */
  8555. bfd_put_32 (output_bfd, initial_got_entry,
  8556. sgot->contents + got_offset);
  8557. if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
  8558. {
  8559. /* Setup initial funcdesc value. */
  8560. /* FIXME: we don't support lazy binding because there is a
  8561. race condition between both words getting written and
  8562. some other thread attempting to read them. The ARM
  8563. architecture does not have an atomic 64 bit load/store
  8564. instruction that could be used to prevent it; it is
  8565. recommended that threaded FDPIC applications run with the
  8566. LD_BIND_NOW environment variable set. */
  8567. bfd_put_32 (output_bfd, plt_address + 0x18,
  8568. sgot->contents + got_offset);
  8569. bfd_put_32 (output_bfd, -1 /*TODO*/,
  8570. sgot->contents + got_offset + 4);
  8571. }
  8572. if (dynindx == -1)
  8573. elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
  8574. else
  8575. {
  8576. if (htab->fdpic_p)
  8577. {
  8578. /* For FDPIC we put PLT relocationss into .rel.got when not
  8579. lazy binding otherwise we put them in .rel.plt. For now,
  8580. we don't support lazy binding so put it in .rel.got. */
  8581. if (info->flags & DF_BIND_NOW)
  8582. elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelgot, &rel);
  8583. else
  8584. elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelplt, &rel);
  8585. }
  8586. else
  8587. {
  8588. loc = srel->contents + plt_index * RELOC_SIZE (htab);
  8589. SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
  8590. }
  8591. }
  8592. return true;
  8593. }
  8594. /* Some relocations map to different relocations depending on the
  8595. target. Return the real relocation. */
  8596. static int
  8597. arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
  8598. int r_type)
  8599. {
  8600. switch (r_type)
  8601. {
  8602. case R_ARM_TARGET1:
  8603. if (globals->target1_is_rel)
  8604. return R_ARM_REL32;
  8605. else
  8606. return R_ARM_ABS32;
  8607. case R_ARM_TARGET2:
  8608. return globals->target2_reloc;
  8609. default:
  8610. return r_type;
  8611. }
  8612. }
  8613. /* Return the base VMA address which should be subtracted from real addresses
  8614. when resolving @dtpoff relocation.
  8615. This is PT_TLS segment p_vaddr. */
  8616. static bfd_vma
  8617. dtpoff_base (struct bfd_link_info *info)
  8618. {
  8619. /* If tls_sec is NULL, we should have signalled an error already. */
  8620. if (elf_hash_table (info)->tls_sec == NULL)
  8621. return 0;
  8622. return elf_hash_table (info)->tls_sec->vma;
  8623. }
  8624. /* Return the relocation value for @tpoff relocation
  8625. if STT_TLS virtual address is ADDRESS. */
  8626. static bfd_vma
  8627. tpoff (struct bfd_link_info *info, bfd_vma address)
  8628. {
  8629. struct elf_link_hash_table *htab = elf_hash_table (info);
  8630. bfd_vma base;
  8631. /* If tls_sec is NULL, we should have signalled an error already. */
  8632. if (htab->tls_sec == NULL)
  8633. return 0;
  8634. base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
  8635. return address - htab->tls_sec->vma + base;
  8636. }
  8637. /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
  8638. VALUE is the relocation value. */
  8639. static bfd_reloc_status_type
  8640. elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
  8641. {
  8642. if (value > 0xfff)
  8643. return bfd_reloc_overflow;
  8644. value |= bfd_get_32 (abfd, data) & 0xfffff000;
  8645. bfd_put_32 (abfd, value, data);
  8646. return bfd_reloc_ok;
  8647. }
  8648. /* Handle TLS relaxations. Relaxing is possible for symbols that use
  8649. R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
  8650. R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
  8651. Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
  8652. is to then call final_link_relocate. Return other values in the
  8653. case of error.
  8654. FIXME:When --emit-relocs is in effect, we'll emit relocs describing
  8655. the pre-relaxed code. It would be nice if the relocs were updated
  8656. to match the optimization. */
  8657. static bfd_reloc_status_type
  8658. elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
  8659. bfd *input_bfd, asection *input_sec, bfd_byte *contents,
  8660. Elf_Internal_Rela *rel, unsigned long is_local)
  8661. {
  8662. unsigned long insn;
  8663. switch (ELF32_R_TYPE (rel->r_info))
  8664. {
  8665. default:
  8666. return bfd_reloc_notsupported;
  8667. case R_ARM_TLS_GOTDESC:
  8668. if (is_local)
  8669. insn = 0;
  8670. else
  8671. {
  8672. insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
  8673. if (insn & 1)
  8674. insn -= 5; /* THUMB */
  8675. else
  8676. insn -= 8; /* ARM */
  8677. }
  8678. bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
  8679. return bfd_reloc_continue;
  8680. case R_ARM_THM_TLS_DESCSEQ:
  8681. /* Thumb insn. */
  8682. insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
  8683. if ((insn & 0xff78) == 0x4478) /* add rx, pc */
  8684. {
  8685. if (is_local)
  8686. /* nop */
  8687. bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
  8688. }
  8689. else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
  8690. {
  8691. if (is_local)
  8692. /* nop */
  8693. bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
  8694. else
  8695. /* ldr rx,[ry] */
  8696. bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
  8697. }
  8698. else if ((insn & 0xff87) == 0x4780) /* blx rx */
  8699. {
  8700. if (is_local)
  8701. /* nop */
  8702. bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
  8703. else
  8704. /* mov r0, rx */
  8705. bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
  8706. contents + rel->r_offset);
  8707. }
  8708. else
  8709. {
  8710. if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
  8711. /* It's a 32 bit instruction, fetch the rest of it for
  8712. error generation. */
  8713. insn = (insn << 16)
  8714. | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
  8715. _bfd_error_handler
  8716. /* xgettext:c-format */
  8717. (_("%pB(%pA+%#" PRIx64 "): "
  8718. "unexpected %s instruction '%#lx' in TLS trampoline"),
  8719. input_bfd, input_sec, (uint64_t) rel->r_offset,
  8720. "Thumb", insn);
  8721. return bfd_reloc_notsupported;
  8722. }
  8723. break;
  8724. case R_ARM_TLS_DESCSEQ:
  8725. /* arm insn. */
  8726. insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
  8727. if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
  8728. {
  8729. if (is_local)
  8730. /* mov rx, ry */
  8731. bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
  8732. contents + rel->r_offset);
  8733. }
  8734. else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
  8735. {
  8736. if (is_local)
  8737. /* nop */
  8738. bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
  8739. else
  8740. /* ldr rx,[ry] */
  8741. bfd_put_32 (input_bfd, insn & 0xfffff000,
  8742. contents + rel->r_offset);
  8743. }
  8744. else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
  8745. {
  8746. if (is_local)
  8747. /* nop */
  8748. bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
  8749. else
  8750. /* mov r0, rx */
  8751. bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
  8752. contents + rel->r_offset);
  8753. }
  8754. else
  8755. {
  8756. _bfd_error_handler
  8757. /* xgettext:c-format */
  8758. (_("%pB(%pA+%#" PRIx64 "): "
  8759. "unexpected %s instruction '%#lx' in TLS trampoline"),
  8760. input_bfd, input_sec, (uint64_t) rel->r_offset,
  8761. "ARM", insn);
  8762. return bfd_reloc_notsupported;
  8763. }
  8764. break;
  8765. case R_ARM_TLS_CALL:
  8766. /* GD->IE relaxation, turn the instruction into 'nop' or
  8767. 'ldr r0, [pc,r0]' */
  8768. insn = is_local ? 0xe1a00000 : 0xe79f0000;
  8769. bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
  8770. break;
  8771. case R_ARM_THM_TLS_CALL:
  8772. /* GD->IE relaxation. */
  8773. if (!is_local)
  8774. /* add r0,pc; ldr r0, [r0] */
  8775. insn = 0x44786800;
  8776. else if (using_thumb2 (globals))
  8777. /* nop.w */
  8778. insn = 0xf3af8000;
  8779. else
  8780. /* nop; nop */
  8781. insn = 0xbf00bf00;
  8782. bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
  8783. bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
  8784. break;
  8785. }
  8786. return bfd_reloc_ok;
  8787. }
  8788. /* For a given value of n, calculate the value of G_n as required to
  8789. deal with group relocations. We return it in the form of an
  8790. encoded constant-and-rotation, together with the final residual. If n is
  8791. specified as less than zero, then final_residual is filled with the
  8792. input value and no further action is performed. */
  8793. static bfd_vma
  8794. calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
  8795. {
  8796. int current_n;
  8797. bfd_vma g_n;
  8798. bfd_vma encoded_g_n = 0;
  8799. bfd_vma residual = value; /* Also known as Y_n. */
  8800. for (current_n = 0; current_n <= n; current_n++)
  8801. {
  8802. int shift;
  8803. /* Calculate which part of the value to mask. */
  8804. if (residual == 0)
  8805. shift = 0;
  8806. else
  8807. {
  8808. int msb;
  8809. /* Determine the most significant bit in the residual and
  8810. align the resulting value to a 2-bit boundary. */
  8811. for (msb = 30; msb >= 0; msb -= 2)
  8812. if (residual & (3u << msb))
  8813. break;
  8814. /* The desired shift is now (msb - 6), or zero, whichever
  8815. is the greater. */
  8816. shift = msb - 6;
  8817. if (shift < 0)
  8818. shift = 0;
  8819. }
  8820. /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
  8821. g_n = residual & (0xff << shift);
  8822. encoded_g_n = (g_n >> shift)
  8823. | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
  8824. /* Calculate the residual for the next time around. */
  8825. residual &= ~g_n;
  8826. }
  8827. *final_residual = residual;
  8828. return encoded_g_n;
  8829. }
  8830. /* Given an ARM instruction, determine whether it is an ADD or a SUB.
  8831. Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
  8832. static int
  8833. identify_add_or_sub (bfd_vma insn)
  8834. {
  8835. int opcode = insn & 0x1e00000;
  8836. if (opcode == 1 << 23) /* ADD */
  8837. return 1;
  8838. if (opcode == 1 << 22) /* SUB */
  8839. return -1;
  8840. return 0;
  8841. }
  8842. /* Perform a relocation as part of a final link. */
  8843. static bfd_reloc_status_type
  8844. elf32_arm_final_link_relocate (reloc_howto_type * howto,
  8845. bfd * input_bfd,
  8846. bfd * output_bfd,
  8847. asection * input_section,
  8848. bfd_byte * contents,
  8849. Elf_Internal_Rela * rel,
  8850. bfd_vma value,
  8851. struct bfd_link_info * info,
  8852. asection * sym_sec,
  8853. const char * sym_name,
  8854. unsigned char st_type,
  8855. enum arm_st_branch_type branch_type,
  8856. struct elf_link_hash_entry * h,
  8857. bool * unresolved_reloc_p,
  8858. char ** error_message)
  8859. {
  8860. unsigned long r_type = howto->type;
  8861. unsigned long r_symndx;
  8862. bfd_byte * hit_data = contents + rel->r_offset;
  8863. bfd_vma * local_got_offsets;
  8864. bfd_vma * local_tlsdesc_gotents;
  8865. asection * sgot;
  8866. asection * splt;
  8867. asection * sreloc = NULL;
  8868. asection * srelgot;
  8869. bfd_vma addend;
  8870. bfd_signed_vma signed_addend;
  8871. unsigned char dynreloc_st_type;
  8872. bfd_vma dynreloc_value;
  8873. struct elf32_arm_link_hash_table * globals;
  8874. struct elf32_arm_link_hash_entry *eh;
  8875. union gotplt_union *root_plt;
  8876. struct arm_plt_info *arm_plt;
  8877. bfd_vma plt_offset;
  8878. bfd_vma gotplt_offset;
  8879. bool has_iplt_entry;
  8880. bool resolved_to_zero;
  8881. globals = elf32_arm_hash_table (info);
  8882. if (globals == NULL)
  8883. return bfd_reloc_notsupported;
  8884. BFD_ASSERT (is_arm_elf (input_bfd));
  8885. BFD_ASSERT (howto != NULL);
  8886. /* Some relocation types map to different relocations depending on the
  8887. target. We pick the right one here. */
  8888. r_type = arm_real_reloc_type (globals, r_type);
  8889. /* It is possible to have linker relaxations on some TLS access
  8890. models. Update our information here. */
  8891. r_type = elf32_arm_tls_transition (info, r_type, h);
  8892. if (r_type != howto->type)
  8893. howto = elf32_arm_howto_from_type (r_type);
  8894. eh = (struct elf32_arm_link_hash_entry *) h;
  8895. sgot = globals->root.sgot;
  8896. local_got_offsets = elf_local_got_offsets (input_bfd);
  8897. local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
  8898. if (globals->root.dynamic_sections_created)
  8899. srelgot = globals->root.srelgot;
  8900. else
  8901. srelgot = NULL;
  8902. r_symndx = ELF32_R_SYM (rel->r_info);
  8903. if (globals->use_rel)
  8904. {
  8905. bfd_vma sign;
  8906. switch (howto->size)
  8907. {
  8908. case 0: addend = bfd_get_8 (input_bfd, hit_data); break;
  8909. case 1: addend = bfd_get_16 (input_bfd, hit_data); break;
  8910. case 2: addend = bfd_get_32 (input_bfd, hit_data); break;
  8911. default: addend = 0; break;
  8912. }
  8913. /* Note: the addend and signed_addend calculated here are
  8914. incorrect for any split field. */
  8915. addend &= howto->src_mask;
  8916. sign = howto->src_mask & ~(howto->src_mask >> 1);
  8917. signed_addend = (addend ^ sign) - sign;
  8918. signed_addend = (bfd_vma) signed_addend << howto->rightshift;
  8919. addend <<= howto->rightshift;
  8920. }
  8921. else
  8922. addend = signed_addend = rel->r_addend;
  8923. /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
  8924. are resolving a function call relocation. */
  8925. if (using_thumb_only (globals)
  8926. && (r_type == R_ARM_THM_CALL
  8927. || r_type == R_ARM_THM_JUMP24)
  8928. && branch_type == ST_BRANCH_TO_ARM)
  8929. branch_type = ST_BRANCH_TO_THUMB;
  8930. /* Record the symbol information that should be used in dynamic
  8931. relocations. */
  8932. dynreloc_st_type = st_type;
  8933. dynreloc_value = value;
  8934. if (branch_type == ST_BRANCH_TO_THUMB)
  8935. dynreloc_value |= 1;
  8936. /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
  8937. VALUE appropriately for relocations that we resolve at link time. */
  8938. has_iplt_entry = false;
  8939. if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
  8940. &arm_plt)
  8941. && root_plt->offset != (bfd_vma) -1)
  8942. {
  8943. plt_offset = root_plt->offset;
  8944. gotplt_offset = arm_plt->got_offset;
  8945. if (h == NULL || eh->is_iplt)
  8946. {
  8947. has_iplt_entry = true;
  8948. splt = globals->root.iplt;
  8949. /* Populate .iplt entries here, because not all of them will
  8950. be seen by finish_dynamic_symbol. The lower bit is set if
  8951. we have already populated the entry. */
  8952. if (plt_offset & 1)
  8953. plt_offset--;
  8954. else
  8955. {
  8956. if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
  8957. -1, dynreloc_value))
  8958. root_plt->offset |= 1;
  8959. else
  8960. return bfd_reloc_notsupported;
  8961. }
  8962. /* Static relocations always resolve to the .iplt entry. */
  8963. st_type = STT_FUNC;
  8964. value = (splt->output_section->vma
  8965. + splt->output_offset
  8966. + plt_offset);
  8967. branch_type = ST_BRANCH_TO_ARM;
  8968. /* If there are non-call relocations that resolve to the .iplt
  8969. entry, then all dynamic ones must too. */
  8970. if (arm_plt->noncall_refcount != 0)
  8971. {
  8972. dynreloc_st_type = st_type;
  8973. dynreloc_value = value;
  8974. }
  8975. }
  8976. else
  8977. /* We populate the .plt entry in finish_dynamic_symbol. */
  8978. splt = globals->root.splt;
  8979. }
  8980. else
  8981. {
  8982. splt = NULL;
  8983. plt_offset = (bfd_vma) -1;
  8984. gotplt_offset = (bfd_vma) -1;
  8985. }
  8986. resolved_to_zero = (h != NULL
  8987. && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
  8988. switch (r_type)
  8989. {
  8990. case R_ARM_NONE:
  8991. /* We don't need to find a value for this symbol. It's just a
  8992. marker. */
  8993. *unresolved_reloc_p = false;
  8994. return bfd_reloc_ok;
  8995. case R_ARM_ABS12:
  8996. if (globals->root.target_os != is_vxworks)
  8997. return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
  8998. /* Fall through. */
  8999. case R_ARM_PC24:
  9000. case R_ARM_ABS32:
  9001. case R_ARM_ABS32_NOI:
  9002. case R_ARM_REL32:
  9003. case R_ARM_REL32_NOI:
  9004. case R_ARM_CALL:
  9005. case R_ARM_JUMP24:
  9006. case R_ARM_XPC25:
  9007. case R_ARM_PREL31:
  9008. case R_ARM_PLT32:
  9009. /* Handle relocations which should use the PLT entry. ABS32/REL32
  9010. will use the symbol's value, which may point to a PLT entry, but we
  9011. don't need to handle that here. If we created a PLT entry, all
  9012. branches in this object should go to it, except if the PLT is too
  9013. far away, in which case a long branch stub should be inserted. */
  9014. if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
  9015. && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
  9016. && r_type != R_ARM_CALL
  9017. && r_type != R_ARM_JUMP24
  9018. && r_type != R_ARM_PLT32)
  9019. && plt_offset != (bfd_vma) -1)
  9020. {
  9021. /* If we've created a .plt section, and assigned a PLT entry
  9022. to this function, it must either be a STT_GNU_IFUNC reference
  9023. or not be known to bind locally. In other cases, we should
  9024. have cleared the PLT entry by now. */
  9025. BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
  9026. value = (splt->output_section->vma
  9027. + splt->output_offset
  9028. + plt_offset);
  9029. *unresolved_reloc_p = false;
  9030. return _bfd_final_link_relocate (howto, input_bfd, input_section,
  9031. contents, rel->r_offset, value,
  9032. rel->r_addend);
  9033. }
  9034. /* When generating a shared object or relocatable executable, these
  9035. relocations are copied into the output file to be resolved at
  9036. run time. */
  9037. if ((bfd_link_pic (info)
  9038. || globals->root.is_relocatable_executable
  9039. || globals->fdpic_p)
  9040. && (input_section->flags & SEC_ALLOC)
  9041. && !(globals->root.target_os == is_vxworks
  9042. && strcmp (input_section->output_section->name,
  9043. ".tls_vars") == 0)
  9044. && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
  9045. || !SYMBOL_CALLS_LOCAL (info, h))
  9046. && !(input_bfd == globals->stub_bfd
  9047. && strstr (input_section->name, STUB_SUFFIX))
  9048. && (h == NULL
  9049. || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
  9050. && !resolved_to_zero)
  9051. || h->root.type != bfd_link_hash_undefweak)
  9052. && r_type != R_ARM_PC24
  9053. && r_type != R_ARM_CALL
  9054. && r_type != R_ARM_JUMP24
  9055. && r_type != R_ARM_PREL31
  9056. && r_type != R_ARM_PLT32)
  9057. {
  9058. Elf_Internal_Rela outrel;
  9059. bool skip, relocate;
  9060. int isrofixup = 0;
  9061. if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
  9062. && !h->def_regular)
  9063. {
  9064. char *v = _("shared object");
  9065. if (bfd_link_executable (info))
  9066. v = _("PIE executable");
  9067. _bfd_error_handler
  9068. (_("%pB: relocation %s against external or undefined symbol `%s'"
  9069. " can not be used when making a %s; recompile with -fPIC"), input_bfd,
  9070. elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
  9071. return bfd_reloc_notsupported;
  9072. }
  9073. *unresolved_reloc_p = false;
  9074. if (sreloc == NULL && globals->root.dynamic_sections_created)
  9075. {
  9076. sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
  9077. ! globals->use_rel);
  9078. if (sreloc == NULL)
  9079. return bfd_reloc_notsupported;
  9080. }
  9081. skip = false;
  9082. relocate = false;
  9083. outrel.r_addend = addend;
  9084. outrel.r_offset =
  9085. _bfd_elf_section_offset (output_bfd, info, input_section,
  9086. rel->r_offset);
  9087. if (outrel.r_offset == (bfd_vma) -1)
  9088. skip = true;
  9089. else if (outrel.r_offset == (bfd_vma) -2)
  9090. skip = true, relocate = true;
  9091. outrel.r_offset += (input_section->output_section->vma
  9092. + input_section->output_offset);
  9093. if (skip)
  9094. memset (&outrel, 0, sizeof outrel);
  9095. else if (h != NULL
  9096. && h->dynindx != -1
  9097. && (!bfd_link_pic (info)
  9098. || !(bfd_link_pie (info)
  9099. || SYMBOLIC_BIND (info, h))
  9100. || !h->def_regular))
  9101. outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
  9102. else
  9103. {
  9104. int symbol;
  9105. /* This symbol is local, or marked to become local. */
  9106. BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
  9107. || (globals->fdpic_p && !bfd_link_pic (info)));
  9108. /* On SVR4-ish systems, the dynamic loader cannot
  9109. relocate the text and data segments independently,
  9110. so the symbol does not matter. */
  9111. symbol = 0;
  9112. if (dynreloc_st_type == STT_GNU_IFUNC)
  9113. /* We have an STT_GNU_IFUNC symbol that doesn't resolve
  9114. to the .iplt entry. Instead, every non-call reference
  9115. must use an R_ARM_IRELATIVE relocation to obtain the
  9116. correct run-time address. */
  9117. outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
  9118. else if (globals->fdpic_p && !bfd_link_pic (info))
  9119. isrofixup = 1;
  9120. else
  9121. outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
  9122. if (globals->use_rel)
  9123. relocate = true;
  9124. else
  9125. outrel.r_addend += dynreloc_value;
  9126. }
  9127. if (isrofixup)
  9128. arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
  9129. else
  9130. elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
  9131. /* If this reloc is against an external symbol, we do not want to
  9132. fiddle with the addend. Otherwise, we need to include the symbol
  9133. value so that it becomes an addend for the dynamic reloc. */
  9134. if (! relocate)
  9135. return bfd_reloc_ok;
  9136. return _bfd_final_link_relocate (howto, input_bfd, input_section,
  9137. contents, rel->r_offset,
  9138. dynreloc_value, (bfd_vma) 0);
  9139. }
  9140. else switch (r_type)
  9141. {
  9142. case R_ARM_ABS12:
  9143. return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
  9144. case R_ARM_XPC25: /* Arm BLX instruction. */
  9145. case R_ARM_CALL:
  9146. case R_ARM_JUMP24:
  9147. case R_ARM_PC24: /* Arm B/BL instruction. */
  9148. case R_ARM_PLT32:
  9149. {
  9150. struct elf32_arm_stub_hash_entry *stub_entry = NULL;
  9151. if (r_type == R_ARM_XPC25)
  9152. {
  9153. /* Check for Arm calling Arm function. */
  9154. /* FIXME: Should we translate the instruction into a BL
  9155. instruction instead ? */
  9156. if (branch_type != ST_BRANCH_TO_THUMB)
  9157. _bfd_error_handler
  9158. (_("\%pB: warning: %s BLX instruction targets"
  9159. " %s function '%s'"),
  9160. input_bfd, "ARM",
  9161. "ARM", h ? h->root.root.string : "(local)");
  9162. }
  9163. else if (r_type == R_ARM_PC24)
  9164. {
  9165. /* Check for Arm calling Thumb function. */
  9166. if (branch_type == ST_BRANCH_TO_THUMB)
  9167. {
  9168. if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
  9169. output_bfd, input_section,
  9170. hit_data, sym_sec, rel->r_offset,
  9171. signed_addend, value,
  9172. error_message))
  9173. return bfd_reloc_ok;
  9174. else
  9175. return bfd_reloc_dangerous;
  9176. }
  9177. }
  9178. /* Check if a stub has to be inserted because the
  9179. destination is too far or we are changing mode. */
  9180. if ( r_type == R_ARM_CALL
  9181. || r_type == R_ARM_JUMP24
  9182. || r_type == R_ARM_PLT32)
  9183. {
  9184. enum elf32_arm_stub_type stub_type = arm_stub_none;
  9185. struct elf32_arm_link_hash_entry *hash;
  9186. hash = (struct elf32_arm_link_hash_entry *) h;
  9187. stub_type = arm_type_of_stub (info, input_section, rel,
  9188. st_type, &branch_type,
  9189. hash, value, sym_sec,
  9190. input_bfd, sym_name);
  9191. if (stub_type != arm_stub_none)
  9192. {
  9193. /* The target is out of reach, so redirect the
  9194. branch to the local stub for this function. */
  9195. stub_entry = elf32_arm_get_stub_entry (input_section,
  9196. sym_sec, h,
  9197. rel, globals,
  9198. stub_type);
  9199. {
  9200. if (stub_entry != NULL)
  9201. value = (stub_entry->stub_offset
  9202. + stub_entry->stub_sec->output_offset
  9203. + stub_entry->stub_sec->output_section->vma);
  9204. if (plt_offset != (bfd_vma) -1)
  9205. *unresolved_reloc_p = false;
  9206. }
  9207. }
  9208. else
  9209. {
  9210. /* If the call goes through a PLT entry, make sure to
  9211. check distance to the right destination address. */
  9212. if (plt_offset != (bfd_vma) -1)
  9213. {
  9214. value = (splt->output_section->vma
  9215. + splt->output_offset
  9216. + plt_offset);
  9217. *unresolved_reloc_p = false;
  9218. /* The PLT entry is in ARM mode, regardless of the
  9219. target function. */
  9220. branch_type = ST_BRANCH_TO_ARM;
  9221. }
  9222. }
  9223. }
  9224. /* The ARM ELF ABI says that this reloc is computed as: S - P + A
  9225. where:
  9226. S is the address of the symbol in the relocation.
  9227. P is address of the instruction being relocated.
  9228. A is the addend (extracted from the instruction) in bytes.
  9229. S is held in 'value'.
  9230. P is the base address of the section containing the
  9231. instruction plus the offset of the reloc into that
  9232. section, ie:
  9233. (input_section->output_section->vma +
  9234. input_section->output_offset +
  9235. rel->r_offset).
  9236. A is the addend, converted into bytes, ie:
  9237. (signed_addend * 4)
  9238. Note: None of these operations have knowledge of the pipeline
  9239. size of the processor, thus it is up to the assembler to
  9240. encode this information into the addend. */
  9241. value -= (input_section->output_section->vma
  9242. + input_section->output_offset);
  9243. value -= rel->r_offset;
  9244. value += signed_addend;
  9245. signed_addend = value;
  9246. signed_addend >>= howto->rightshift;
  9247. /* A branch to an undefined weak symbol is turned into a jump to
  9248. the next instruction unless a PLT entry will be created.
  9249. Do the same for local undefined symbols (but not for STN_UNDEF).
  9250. The jump to the next instruction is optimized as a NOP depending
  9251. on the architecture. */
  9252. if (h ? (h->root.type == bfd_link_hash_undefweak
  9253. && plt_offset == (bfd_vma) -1)
  9254. : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
  9255. {
  9256. value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
  9257. if (arch_has_arm_nop (globals))
  9258. value |= 0x0320f000;
  9259. else
  9260. value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
  9261. }
  9262. else
  9263. {
  9264. /* Perform a signed range check. */
  9265. if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
  9266. || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
  9267. return bfd_reloc_overflow;
  9268. addend = (value & 2);
  9269. value = (signed_addend & howto->dst_mask)
  9270. | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
  9271. if (r_type == R_ARM_CALL)
  9272. {
  9273. /* Set the H bit in the BLX instruction. */
  9274. if (branch_type == ST_BRANCH_TO_THUMB)
  9275. {
  9276. if (addend)
  9277. value |= (1 << 24);
  9278. else
  9279. value &= ~(bfd_vma)(1 << 24);
  9280. }
  9281. /* Select the correct instruction (BL or BLX). */
  9282. /* Only if we are not handling a BL to a stub. In this
  9283. case, mode switching is performed by the stub. */
  9284. if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
  9285. value |= (1 << 28);
  9286. else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
  9287. {
  9288. value &= ~(bfd_vma)(1 << 28);
  9289. value |= (1 << 24);
  9290. }
  9291. }
  9292. }
  9293. }
  9294. break;
  9295. case R_ARM_ABS32:
  9296. value += addend;
  9297. if (branch_type == ST_BRANCH_TO_THUMB)
  9298. value |= 1;
  9299. break;
  9300. case R_ARM_ABS32_NOI:
  9301. value += addend;
  9302. break;
  9303. case R_ARM_REL32:
  9304. value += addend;
  9305. if (branch_type == ST_BRANCH_TO_THUMB)
  9306. value |= 1;
  9307. value -= (input_section->output_section->vma
  9308. + input_section->output_offset + rel->r_offset);
  9309. break;
  9310. case R_ARM_REL32_NOI:
  9311. value += addend;
  9312. value -= (input_section->output_section->vma
  9313. + input_section->output_offset + rel->r_offset);
  9314. break;
  9315. case R_ARM_PREL31:
  9316. value -= (input_section->output_section->vma
  9317. + input_section->output_offset + rel->r_offset);
  9318. value += signed_addend;
  9319. if (! h || h->root.type != bfd_link_hash_undefweak)
  9320. {
  9321. /* Check for overflow. */
  9322. if ((value ^ (value >> 1)) & (1 << 30))
  9323. return bfd_reloc_overflow;
  9324. }
  9325. value &= 0x7fffffff;
  9326. value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
  9327. if (branch_type == ST_BRANCH_TO_THUMB)
  9328. value |= 1;
  9329. break;
  9330. }
  9331. bfd_put_32 (input_bfd, value, hit_data);
  9332. return bfd_reloc_ok;
  9333. case R_ARM_ABS8:
  9334. value += addend;
  9335. /* There is no way to tell whether the user intended to use a signed or
  9336. unsigned addend. When checking for overflow we accept either,
  9337. as specified by the AAELF. */
  9338. if ((long) value > 0xff || (long) value < -0x80)
  9339. return bfd_reloc_overflow;
  9340. bfd_put_8 (input_bfd, value, hit_data);
  9341. return bfd_reloc_ok;
  9342. case R_ARM_ABS16:
  9343. value += addend;
  9344. /* See comment for R_ARM_ABS8. */
  9345. if ((long) value > 0xffff || (long) value < -0x8000)
  9346. return bfd_reloc_overflow;
  9347. bfd_put_16 (input_bfd, value, hit_data);
  9348. return bfd_reloc_ok;
  9349. case R_ARM_THM_ABS5:
  9350. /* Support ldr and str instructions for the thumb. */
  9351. if (globals->use_rel)
  9352. {
  9353. /* Need to refetch addend. */
  9354. addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
  9355. /* ??? Need to determine shift amount from operand size. */
  9356. addend >>= howto->rightshift;
  9357. }
  9358. value += addend;
  9359. /* ??? Isn't value unsigned? */
  9360. if ((long) value > 0x1f || (long) value < -0x10)
  9361. return bfd_reloc_overflow;
  9362. /* ??? Value needs to be properly shifted into place first. */
  9363. value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
  9364. bfd_put_16 (input_bfd, value, hit_data);
  9365. return bfd_reloc_ok;
  9366. case R_ARM_THM_ALU_PREL_11_0:
  9367. /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
  9368. {
  9369. bfd_vma insn;
  9370. bfd_signed_vma relocation;
  9371. insn = (bfd_get_16 (input_bfd, hit_data) << 16)
  9372. | bfd_get_16 (input_bfd, hit_data + 2);
  9373. if (globals->use_rel)
  9374. {
  9375. signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
  9376. | ((insn & (1 << 26)) >> 15);
  9377. if (insn & 0xf00000)
  9378. signed_addend = -signed_addend;
  9379. }
  9380. relocation = value + signed_addend;
  9381. relocation -= Pa (input_section->output_section->vma
  9382. + input_section->output_offset
  9383. + rel->r_offset);
  9384. /* PR 21523: Use an absolute value. The user of this reloc will
  9385. have already selected an ADD or SUB insn appropriately. */
  9386. value = llabs (relocation);
  9387. if (value >= 0x1000)
  9388. return bfd_reloc_overflow;
  9389. /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
  9390. if (branch_type == ST_BRANCH_TO_THUMB)
  9391. value |= 1;
  9392. insn = (insn & 0xfb0f8f00) | (value & 0xff)
  9393. | ((value & 0x700) << 4)
  9394. | ((value & 0x800) << 15);
  9395. if (relocation < 0)
  9396. insn |= 0xa00000;
  9397. bfd_put_16 (input_bfd, insn >> 16, hit_data);
  9398. bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
  9399. return bfd_reloc_ok;
  9400. }
  9401. case R_ARM_THM_PC8:
  9402. /* PR 10073: This reloc is not generated by the GNU toolchain,
  9403. but it is supported for compatibility with third party libraries
  9404. generated by other compilers, specifically the ARM/IAR. */
  9405. {
  9406. bfd_vma insn;
  9407. bfd_signed_vma relocation;
  9408. insn = bfd_get_16 (input_bfd, hit_data);
  9409. if (globals->use_rel)
  9410. addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
  9411. relocation = value + addend;
  9412. relocation -= Pa (input_section->output_section->vma
  9413. + input_section->output_offset
  9414. + rel->r_offset);
  9415. value = relocation;
  9416. /* We do not check for overflow of this reloc. Although strictly
  9417. speaking this is incorrect, it appears to be necessary in order
  9418. to work with IAR generated relocs. Since GCC and GAS do not
  9419. generate R_ARM_THM_PC8 relocs, the lack of a check should not be
  9420. a problem for them. */
  9421. value &= 0x3fc;
  9422. insn = (insn & 0xff00) | (value >> 2);
  9423. bfd_put_16 (input_bfd, insn, hit_data);
  9424. return bfd_reloc_ok;
  9425. }
  9426. case R_ARM_THM_PC12:
  9427. /* Corresponds to: ldr.w reg, [pc, #offset]. */
  9428. {
  9429. bfd_vma insn;
  9430. bfd_signed_vma relocation;
  9431. insn = (bfd_get_16 (input_bfd, hit_data) << 16)
  9432. | bfd_get_16 (input_bfd, hit_data + 2);
  9433. if (globals->use_rel)
  9434. {
  9435. signed_addend = insn & 0xfff;
  9436. if (!(insn & (1 << 23)))
  9437. signed_addend = -signed_addend;
  9438. }
  9439. relocation = value + signed_addend;
  9440. relocation -= Pa (input_section->output_section->vma
  9441. + input_section->output_offset
  9442. + rel->r_offset);
  9443. value = relocation;
  9444. if (value >= 0x1000)
  9445. return bfd_reloc_overflow;
  9446. insn = (insn & 0xff7ff000) | value;
  9447. if (relocation >= 0)
  9448. insn |= (1 << 23);
  9449. bfd_put_16 (input_bfd, insn >> 16, hit_data);
  9450. bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
  9451. return bfd_reloc_ok;
  9452. }
  9453. case R_ARM_THM_XPC22:
  9454. case R_ARM_THM_CALL:
  9455. case R_ARM_THM_JUMP24:
  9456. /* Thumb BL (branch long instruction). */
  9457. {
  9458. bfd_vma relocation;
  9459. bfd_vma reloc_sign;
  9460. bool overflow = false;
  9461. bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
  9462. bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
  9463. bfd_signed_vma reloc_signed_max;
  9464. bfd_signed_vma reloc_signed_min;
  9465. bfd_vma check;
  9466. bfd_signed_vma signed_check;
  9467. int bitsize;
  9468. const int thumb2 = using_thumb2 (globals);
  9469. const int thumb2_bl = using_thumb2_bl (globals);
  9470. /* A branch to an undefined weak symbol is turned into a jump to
  9471. the next instruction unless a PLT entry will be created.
  9472. The jump to the next instruction is optimized as a NOP.W for
  9473. Thumb-2 enabled architectures. */
  9474. if (h && h->root.type == bfd_link_hash_undefweak
  9475. && plt_offset == (bfd_vma) -1)
  9476. {
  9477. if (thumb2)
  9478. {
  9479. bfd_put_16 (input_bfd, 0xf3af, hit_data);
  9480. bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
  9481. }
  9482. else
  9483. {
  9484. bfd_put_16 (input_bfd, 0xe000, hit_data);
  9485. bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
  9486. }
  9487. return bfd_reloc_ok;
  9488. }
  9489. /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
  9490. with Thumb-1) involving the J1 and J2 bits. */
  9491. if (globals->use_rel)
  9492. {
  9493. bfd_vma s = (upper_insn & (1 << 10)) >> 10;
  9494. bfd_vma upper = upper_insn & 0x3ff;
  9495. bfd_vma lower = lower_insn & 0x7ff;
  9496. bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
  9497. bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
  9498. bfd_vma i1 = j1 ^ s ? 0 : 1;
  9499. bfd_vma i2 = j2 ^ s ? 0 : 1;
  9500. addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
  9501. /* Sign extend. */
  9502. addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
  9503. signed_addend = addend;
  9504. }
  9505. if (r_type == R_ARM_THM_XPC22)
  9506. {
  9507. /* Check for Thumb to Thumb call. */
  9508. /* FIXME: Should we translate the instruction into a BL
  9509. instruction instead ? */
  9510. if (branch_type == ST_BRANCH_TO_THUMB)
  9511. _bfd_error_handler
  9512. (_("%pB: warning: %s BLX instruction targets"
  9513. " %s function '%s'"),
  9514. input_bfd, "Thumb",
  9515. "Thumb", h ? h->root.root.string : "(local)");
  9516. }
  9517. else
  9518. {
  9519. /* If it is not a call to Thumb, assume call to Arm.
  9520. If it is a call relative to a section name, then it is not a
  9521. function call at all, but rather a long jump. Calls through
  9522. the PLT do not require stubs. */
  9523. if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
  9524. {
  9525. if (globals->use_blx && r_type == R_ARM_THM_CALL)
  9526. {
  9527. /* Convert BL to BLX. */
  9528. lower_insn = (lower_insn & ~0x1000) | 0x0800;
  9529. }
  9530. else if (( r_type != R_ARM_THM_CALL)
  9531. && (r_type != R_ARM_THM_JUMP24))
  9532. {
  9533. if (elf32_thumb_to_arm_stub
  9534. (info, sym_name, input_bfd, output_bfd, input_section,
  9535. hit_data, sym_sec, rel->r_offset, signed_addend, value,
  9536. error_message))
  9537. return bfd_reloc_ok;
  9538. else
  9539. return bfd_reloc_dangerous;
  9540. }
  9541. }
  9542. else if (branch_type == ST_BRANCH_TO_THUMB
  9543. && globals->use_blx
  9544. && r_type == R_ARM_THM_CALL)
  9545. {
  9546. /* Make sure this is a BL. */
  9547. lower_insn |= 0x1800;
  9548. }
  9549. }
  9550. enum elf32_arm_stub_type stub_type = arm_stub_none;
  9551. if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
  9552. {
  9553. /* Check if a stub has to be inserted because the destination
  9554. is too far. */
  9555. struct elf32_arm_stub_hash_entry *stub_entry;
  9556. struct elf32_arm_link_hash_entry *hash;
  9557. hash = (struct elf32_arm_link_hash_entry *) h;
  9558. stub_type = arm_type_of_stub (info, input_section, rel,
  9559. st_type, &branch_type,
  9560. hash, value, sym_sec,
  9561. input_bfd, sym_name);
  9562. if (stub_type != arm_stub_none)
  9563. {
  9564. /* The target is out of reach or we are changing modes, so
  9565. redirect the branch to the local stub for this
  9566. function. */
  9567. stub_entry = elf32_arm_get_stub_entry (input_section,
  9568. sym_sec, h,
  9569. rel, globals,
  9570. stub_type);
  9571. if (stub_entry != NULL)
  9572. {
  9573. value = (stub_entry->stub_offset
  9574. + stub_entry->stub_sec->output_offset
  9575. + stub_entry->stub_sec->output_section->vma);
  9576. if (plt_offset != (bfd_vma) -1)
  9577. *unresolved_reloc_p = false;
  9578. }
  9579. /* If this call becomes a call to Arm, force BLX. */
  9580. if (globals->use_blx && (r_type == R_ARM_THM_CALL))
  9581. {
  9582. if ((stub_entry
  9583. && !arm_stub_is_thumb (stub_entry->stub_type))
  9584. || branch_type != ST_BRANCH_TO_THUMB)
  9585. lower_insn = (lower_insn & ~0x1000) | 0x0800;
  9586. }
  9587. }
  9588. }
  9589. /* Handle calls via the PLT. */
  9590. if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
  9591. {
  9592. value = (splt->output_section->vma
  9593. + splt->output_offset
  9594. + plt_offset);
  9595. if (globals->use_blx
  9596. && r_type == R_ARM_THM_CALL
  9597. && ! using_thumb_only (globals))
  9598. {
  9599. /* If the Thumb BLX instruction is available, convert
  9600. the BL to a BLX instruction to call the ARM-mode
  9601. PLT entry. */
  9602. lower_insn = (lower_insn & ~0x1000) | 0x0800;
  9603. branch_type = ST_BRANCH_TO_ARM;
  9604. }
  9605. else
  9606. {
  9607. if (! using_thumb_only (globals))
  9608. /* Target the Thumb stub before the ARM PLT entry. */
  9609. value -= PLT_THUMB_STUB_SIZE;
  9610. branch_type = ST_BRANCH_TO_THUMB;
  9611. }
  9612. *unresolved_reloc_p = false;
  9613. }
  9614. relocation = value + signed_addend;
  9615. relocation -= (input_section->output_section->vma
  9616. + input_section->output_offset
  9617. + rel->r_offset);
  9618. check = relocation >> howto->rightshift;
  9619. /* If this is a signed value, the rightshift just dropped
  9620. leading 1 bits (assuming twos complement). */
  9621. if ((bfd_signed_vma) relocation >= 0)
  9622. signed_check = check;
  9623. else
  9624. signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
  9625. /* Calculate the permissable maximum and minimum values for
  9626. this relocation according to whether we're relocating for
  9627. Thumb-2 or not. */
  9628. bitsize = howto->bitsize;
  9629. if (!thumb2_bl)
  9630. bitsize -= 2;
  9631. reloc_signed_max = (1 << (bitsize - 1)) - 1;
  9632. reloc_signed_min = ~reloc_signed_max;
  9633. /* Assumes two's complement. */
  9634. if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
  9635. overflow = true;
  9636. if ((lower_insn & 0x5000) == 0x4000)
  9637. /* For a BLX instruction, make sure that the relocation is rounded up
  9638. to a word boundary. This follows the semantics of the instruction
  9639. which specifies that bit 1 of the target address will come from bit
  9640. 1 of the base address. */
  9641. relocation = (relocation + 2) & ~ 3;
  9642. /* Put RELOCATION back into the insn. Assumes two's complement.
  9643. We use the Thumb-2 encoding, which is safe even if dealing with
  9644. a Thumb-1 instruction by virtue of our overflow check above. */
  9645. reloc_sign = (signed_check < 0) ? 1 : 0;
  9646. upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
  9647. | ((relocation >> 12) & 0x3ff)
  9648. | (reloc_sign << 10);
  9649. lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
  9650. | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
  9651. | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
  9652. | ((relocation >> 1) & 0x7ff);
  9653. /* Put the relocated value back in the object file: */
  9654. bfd_put_16 (input_bfd, upper_insn, hit_data);
  9655. bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
  9656. return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
  9657. }
  9658. break;
  9659. case R_ARM_THM_JUMP19:
  9660. /* Thumb32 conditional branch instruction. */
  9661. {
  9662. bfd_vma relocation;
  9663. bool overflow = false;
  9664. bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
  9665. bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
  9666. bfd_signed_vma reloc_signed_max = 0xffffe;
  9667. bfd_signed_vma reloc_signed_min = -0x100000;
  9668. bfd_signed_vma signed_check;
  9669. enum elf32_arm_stub_type stub_type = arm_stub_none;
  9670. struct elf32_arm_stub_hash_entry *stub_entry;
  9671. struct elf32_arm_link_hash_entry *hash;
  9672. /* Need to refetch the addend, reconstruct the top three bits,
  9673. and squish the two 11 bit pieces together. */
  9674. if (globals->use_rel)
  9675. {
  9676. bfd_vma S = (upper_insn & 0x0400) >> 10;
  9677. bfd_vma upper = (upper_insn & 0x003f);
  9678. bfd_vma J1 = (lower_insn & 0x2000) >> 13;
  9679. bfd_vma J2 = (lower_insn & 0x0800) >> 11;
  9680. bfd_vma lower = (lower_insn & 0x07ff);
  9681. upper |= J1 << 6;
  9682. upper |= J2 << 7;
  9683. upper |= (!S) << 8;
  9684. upper -= 0x0100; /* Sign extend. */
  9685. addend = (upper << 12) | (lower << 1);
  9686. signed_addend = addend;
  9687. }
  9688. /* Handle calls via the PLT. */
  9689. if (plt_offset != (bfd_vma) -1)
  9690. {
  9691. value = (splt->output_section->vma
  9692. + splt->output_offset
  9693. + plt_offset);
  9694. /* Target the Thumb stub before the ARM PLT entry. */
  9695. value -= PLT_THUMB_STUB_SIZE;
  9696. *unresolved_reloc_p = false;
  9697. }
  9698. hash = (struct elf32_arm_link_hash_entry *)h;
  9699. stub_type = arm_type_of_stub (info, input_section, rel,
  9700. st_type, &branch_type,
  9701. hash, value, sym_sec,
  9702. input_bfd, sym_name);
  9703. if (stub_type != arm_stub_none)
  9704. {
  9705. stub_entry = elf32_arm_get_stub_entry (input_section,
  9706. sym_sec, h,
  9707. rel, globals,
  9708. stub_type);
  9709. if (stub_entry != NULL)
  9710. {
  9711. value = (stub_entry->stub_offset
  9712. + stub_entry->stub_sec->output_offset
  9713. + stub_entry->stub_sec->output_section->vma);
  9714. }
  9715. }
  9716. relocation = value + signed_addend;
  9717. relocation -= (input_section->output_section->vma
  9718. + input_section->output_offset
  9719. + rel->r_offset);
  9720. signed_check = (bfd_signed_vma) relocation;
  9721. if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
  9722. overflow = true;
  9723. /* Put RELOCATION back into the insn. */
  9724. {
  9725. bfd_vma S = (relocation & 0x00100000) >> 20;
  9726. bfd_vma J2 = (relocation & 0x00080000) >> 19;
  9727. bfd_vma J1 = (relocation & 0x00040000) >> 18;
  9728. bfd_vma hi = (relocation & 0x0003f000) >> 12;
  9729. bfd_vma lo = (relocation & 0x00000ffe) >> 1;
  9730. upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
  9731. lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
  9732. }
  9733. /* Put the relocated value back in the object file: */
  9734. bfd_put_16 (input_bfd, upper_insn, hit_data);
  9735. bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
  9736. return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
  9737. }
  9738. case R_ARM_THM_JUMP11:
  9739. case R_ARM_THM_JUMP8:
  9740. case R_ARM_THM_JUMP6:
  9741. /* Thumb B (branch) instruction). */
  9742. {
  9743. bfd_signed_vma relocation;
  9744. bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
  9745. bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
  9746. bfd_signed_vma signed_check;
  9747. /* CZB cannot jump backward. */
  9748. if (r_type == R_ARM_THM_JUMP6)
  9749. {
  9750. reloc_signed_min = 0;
  9751. if (globals->use_rel)
  9752. signed_addend = ((addend & 0x200) >> 3) | ((addend & 0xf8) >> 2);
  9753. }
  9754. relocation = value + signed_addend;
  9755. relocation -= (input_section->output_section->vma
  9756. + input_section->output_offset
  9757. + rel->r_offset);
  9758. relocation >>= howto->rightshift;
  9759. signed_check = relocation;
  9760. if (r_type == R_ARM_THM_JUMP6)
  9761. relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
  9762. else
  9763. relocation &= howto->dst_mask;
  9764. relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
  9765. bfd_put_16 (input_bfd, relocation, hit_data);
  9766. /* Assumes two's complement. */
  9767. if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
  9768. return bfd_reloc_overflow;
  9769. return bfd_reloc_ok;
  9770. }
  9771. case R_ARM_ALU_PCREL7_0:
  9772. case R_ARM_ALU_PCREL15_8:
  9773. case R_ARM_ALU_PCREL23_15:
  9774. {
  9775. bfd_vma insn;
  9776. bfd_vma relocation;
  9777. insn = bfd_get_32 (input_bfd, hit_data);
  9778. if (globals->use_rel)
  9779. {
  9780. /* Extract the addend. */
  9781. addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
  9782. signed_addend = addend;
  9783. }
  9784. relocation = value + signed_addend;
  9785. relocation -= (input_section->output_section->vma
  9786. + input_section->output_offset
  9787. + rel->r_offset);
  9788. insn = (insn & ~0xfff)
  9789. | ((howto->bitpos << 7) & 0xf00)
  9790. | ((relocation >> howto->bitpos) & 0xff);
  9791. bfd_put_32 (input_bfd, value, hit_data);
  9792. }
  9793. return bfd_reloc_ok;
  9794. case R_ARM_GNU_VTINHERIT:
  9795. case R_ARM_GNU_VTENTRY:
  9796. return bfd_reloc_ok;
  9797. case R_ARM_GOTOFF32:
  9798. /* Relocation is relative to the start of the
  9799. global offset table. */
  9800. BFD_ASSERT (sgot != NULL);
  9801. if (sgot == NULL)
  9802. return bfd_reloc_notsupported;
  9803. /* If we are addressing a Thumb function, we need to adjust the
  9804. address by one, so that attempts to call the function pointer will
  9805. correctly interpret it as Thumb code. */
  9806. if (branch_type == ST_BRANCH_TO_THUMB)
  9807. value += 1;
  9808. /* Note that sgot->output_offset is not involved in this
  9809. calculation. We always want the start of .got. If we
  9810. define _GLOBAL_OFFSET_TABLE in a different way, as is
  9811. permitted by the ABI, we might have to change this
  9812. calculation. */
  9813. value -= sgot->output_section->vma;
  9814. return _bfd_final_link_relocate (howto, input_bfd, input_section,
  9815. contents, rel->r_offset, value,
  9816. rel->r_addend);
  9817. case R_ARM_GOTPC:
  9818. /* Use global offset table as symbol value. */
  9819. BFD_ASSERT (sgot != NULL);
  9820. if (sgot == NULL)
  9821. return bfd_reloc_notsupported;
  9822. *unresolved_reloc_p = false;
  9823. value = sgot->output_section->vma;
  9824. return _bfd_final_link_relocate (howto, input_bfd, input_section,
  9825. contents, rel->r_offset, value,
  9826. rel->r_addend);
  9827. case R_ARM_GOT32:
  9828. case R_ARM_GOT_PREL:
  9829. /* Relocation is to the entry for this symbol in the
  9830. global offset table. */
  9831. if (sgot == NULL)
  9832. return bfd_reloc_notsupported;
  9833. if (dynreloc_st_type == STT_GNU_IFUNC
  9834. && plt_offset != (bfd_vma) -1
  9835. && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
  9836. {
  9837. /* We have a relocation against a locally-binding STT_GNU_IFUNC
  9838. symbol, and the relocation resolves directly to the runtime
  9839. target rather than to the .iplt entry. This means that any
  9840. .got entry would be the same value as the .igot.plt entry,
  9841. so there's no point creating both. */
  9842. sgot = globals->root.igotplt;
  9843. value = sgot->output_offset + gotplt_offset;
  9844. }
  9845. else if (h != NULL)
  9846. {
  9847. bfd_vma off;
  9848. off = h->got.offset;
  9849. BFD_ASSERT (off != (bfd_vma) -1);
  9850. if ((off & 1) != 0)
  9851. {
  9852. /* We have already processsed one GOT relocation against
  9853. this symbol. */
  9854. off &= ~1;
  9855. if (globals->root.dynamic_sections_created
  9856. && !SYMBOL_REFERENCES_LOCAL (info, h))
  9857. *unresolved_reloc_p = false;
  9858. }
  9859. else
  9860. {
  9861. Elf_Internal_Rela outrel;
  9862. int isrofixup = 0;
  9863. if (((h->dynindx != -1) || globals->fdpic_p)
  9864. && !SYMBOL_REFERENCES_LOCAL (info, h))
  9865. {
  9866. /* If the symbol doesn't resolve locally in a static
  9867. object, we have an undefined reference. If the
  9868. symbol doesn't resolve locally in a dynamic object,
  9869. it should be resolved by the dynamic linker. */
  9870. if (globals->root.dynamic_sections_created)
  9871. {
  9872. outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
  9873. *unresolved_reloc_p = false;
  9874. }
  9875. else
  9876. outrel.r_info = 0;
  9877. outrel.r_addend = 0;
  9878. }
  9879. else
  9880. {
  9881. if (dynreloc_st_type == STT_GNU_IFUNC)
  9882. outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
  9883. else if (bfd_link_pic (info)
  9884. && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
  9885. outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
  9886. else
  9887. {
  9888. outrel.r_info = 0;
  9889. if (globals->fdpic_p)
  9890. isrofixup = 1;
  9891. }
  9892. outrel.r_addend = dynreloc_value;
  9893. }
  9894. /* The GOT entry is initialized to zero by default.
  9895. See if we should install a different value. */
  9896. if (outrel.r_addend != 0
  9897. && (globals->use_rel || outrel.r_info == 0))
  9898. {
  9899. bfd_put_32 (output_bfd, outrel.r_addend,
  9900. sgot->contents + off);
  9901. outrel.r_addend = 0;
  9902. }
  9903. if (isrofixup)
  9904. arm_elf_add_rofixup (output_bfd,
  9905. elf32_arm_hash_table (info)->srofixup,
  9906. sgot->output_section->vma
  9907. + sgot->output_offset + off);
  9908. else if (outrel.r_info != 0)
  9909. {
  9910. outrel.r_offset = (sgot->output_section->vma
  9911. + sgot->output_offset
  9912. + off);
  9913. elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
  9914. }
  9915. h->got.offset |= 1;
  9916. }
  9917. value = sgot->output_offset + off;
  9918. }
  9919. else
  9920. {
  9921. bfd_vma off;
  9922. BFD_ASSERT (local_got_offsets != NULL
  9923. && local_got_offsets[r_symndx] != (bfd_vma) -1);
  9924. off = local_got_offsets[r_symndx];
  9925. /* The offset must always be a multiple of 4. We use the
  9926. least significant bit to record whether we have already
  9927. generated the necessary reloc. */
  9928. if ((off & 1) != 0)
  9929. off &= ~1;
  9930. else
  9931. {
  9932. Elf_Internal_Rela outrel;
  9933. int isrofixup = 0;
  9934. if (dynreloc_st_type == STT_GNU_IFUNC)
  9935. outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
  9936. else if (bfd_link_pic (info))
  9937. outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
  9938. else
  9939. {
  9940. outrel.r_info = 0;
  9941. if (globals->fdpic_p)
  9942. isrofixup = 1;
  9943. }
  9944. /* The GOT entry is initialized to zero by default.
  9945. See if we should install a different value. */
  9946. if (globals->use_rel || outrel.r_info == 0)
  9947. bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
  9948. if (isrofixup)
  9949. arm_elf_add_rofixup (output_bfd,
  9950. globals->srofixup,
  9951. sgot->output_section->vma
  9952. + sgot->output_offset + off);
  9953. else if (outrel.r_info != 0)
  9954. {
  9955. outrel.r_addend = addend + dynreloc_value;
  9956. outrel.r_offset = (sgot->output_section->vma
  9957. + sgot->output_offset
  9958. + off);
  9959. elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
  9960. }
  9961. local_got_offsets[r_symndx] |= 1;
  9962. }
  9963. value = sgot->output_offset + off;
  9964. }
  9965. if (r_type != R_ARM_GOT32)
  9966. value += sgot->output_section->vma;
  9967. return _bfd_final_link_relocate (howto, input_bfd, input_section,
  9968. contents, rel->r_offset, value,
  9969. rel->r_addend);
  9970. case R_ARM_TLS_LDO32:
  9971. value = value - dtpoff_base (info);
  9972. return _bfd_final_link_relocate (howto, input_bfd, input_section,
  9973. contents, rel->r_offset, value,
  9974. rel->r_addend);
  9975. case R_ARM_TLS_LDM32:
  9976. case R_ARM_TLS_LDM32_FDPIC:
  9977. {
  9978. bfd_vma off;
  9979. if (sgot == NULL)
  9980. abort ();
  9981. off = globals->tls_ldm_got.offset;
  9982. if ((off & 1) != 0)
  9983. off &= ~1;
  9984. else
  9985. {
  9986. /* If we don't know the module number, create a relocation
  9987. for it. */
  9988. if (bfd_link_dll (info))
  9989. {
  9990. Elf_Internal_Rela outrel;
  9991. if (srelgot == NULL)
  9992. abort ();
  9993. outrel.r_addend = 0;
  9994. outrel.r_offset = (sgot->output_section->vma
  9995. + sgot->output_offset + off);
  9996. outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
  9997. if (globals->use_rel)
  9998. bfd_put_32 (output_bfd, outrel.r_addend,
  9999. sgot->contents + off);
  10000. elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
  10001. }
  10002. else
  10003. bfd_put_32 (output_bfd, 1, sgot->contents + off);
  10004. globals->tls_ldm_got.offset |= 1;
  10005. }
  10006. if (r_type == R_ARM_TLS_LDM32_FDPIC)
  10007. {
  10008. bfd_put_32 (output_bfd,
  10009. globals->root.sgot->output_offset + off,
  10010. contents + rel->r_offset);
  10011. return bfd_reloc_ok;
  10012. }
  10013. else
  10014. {
  10015. value = sgot->output_section->vma + sgot->output_offset + off
  10016. - (input_section->output_section->vma
  10017. + input_section->output_offset + rel->r_offset);
  10018. return _bfd_final_link_relocate (howto, input_bfd, input_section,
  10019. contents, rel->r_offset, value,
  10020. rel->r_addend);
  10021. }
  10022. }
  10023. case R_ARM_TLS_CALL:
  10024. case R_ARM_THM_TLS_CALL:
  10025. case R_ARM_TLS_GD32:
  10026. case R_ARM_TLS_GD32_FDPIC:
  10027. case R_ARM_TLS_IE32:
  10028. case R_ARM_TLS_IE32_FDPIC:
  10029. case R_ARM_TLS_GOTDESC:
  10030. case R_ARM_TLS_DESCSEQ:
  10031. case R_ARM_THM_TLS_DESCSEQ:
  10032. {
  10033. bfd_vma off, offplt;
  10034. int indx = 0;
  10035. char tls_type;
  10036. BFD_ASSERT (sgot != NULL);
  10037. if (h != NULL)
  10038. {
  10039. bool dyn;
  10040. dyn = globals->root.dynamic_sections_created;
  10041. if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
  10042. bfd_link_pic (info),
  10043. h)
  10044. && (!bfd_link_pic (info)
  10045. || !SYMBOL_REFERENCES_LOCAL (info, h)))
  10046. {
  10047. *unresolved_reloc_p = false;
  10048. indx = h->dynindx;
  10049. }
  10050. off = h->got.offset;
  10051. offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
  10052. tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
  10053. }
  10054. else
  10055. {
  10056. BFD_ASSERT (local_got_offsets != NULL);
  10057. if (r_symndx >= elf32_arm_num_entries (input_bfd))
  10058. {
  10059. _bfd_error_handler (_("\
  10060. %pB: expected symbol index in range 0..%lu but found local symbol with index %lu"),
  10061. input_bfd,
  10062. (unsigned long) elf32_arm_num_entries (input_bfd),
  10063. r_symndx);
  10064. return false;
  10065. }
  10066. off = local_got_offsets[r_symndx];
  10067. offplt = local_tlsdesc_gotents[r_symndx];
  10068. tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
  10069. }
  10070. /* Linker relaxations happens from one of the
  10071. R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
  10072. if (ELF32_R_TYPE (rel->r_info) != r_type)
  10073. tls_type = GOT_TLS_IE;
  10074. BFD_ASSERT (tls_type != GOT_UNKNOWN);
  10075. if ((off & 1) != 0)
  10076. off &= ~1;
  10077. else
  10078. {
  10079. bool need_relocs = false;
  10080. Elf_Internal_Rela outrel;
  10081. int cur_off = off;
  10082. /* The GOT entries have not been initialized yet. Do it
  10083. now, and emit any relocations. If both an IE GOT and a
  10084. GD GOT are necessary, we emit the GD first. */
  10085. if ((bfd_link_dll (info) || indx != 0)
  10086. && (h == NULL
  10087. || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
  10088. && !resolved_to_zero)
  10089. || h->root.type != bfd_link_hash_undefweak))
  10090. {
  10091. need_relocs = true;
  10092. BFD_ASSERT (srelgot != NULL);
  10093. }
  10094. if (tls_type & GOT_TLS_GDESC)
  10095. {
  10096. bfd_byte *loc;
  10097. /* We should have relaxed, unless this is an undefined
  10098. weak symbol. */
  10099. BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
  10100. || bfd_link_dll (info));
  10101. BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
  10102. <= globals->root.sgotplt->size);
  10103. outrel.r_addend = 0;
  10104. outrel.r_offset = (globals->root.sgotplt->output_section->vma
  10105. + globals->root.sgotplt->output_offset
  10106. + offplt
  10107. + globals->sgotplt_jump_table_size);
  10108. outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
  10109. sreloc = globals->root.srelplt;
  10110. loc = sreloc->contents;
  10111. loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
  10112. BFD_ASSERT (loc + RELOC_SIZE (globals)
  10113. <= sreloc->contents + sreloc->size);
  10114. SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
  10115. /* For globals, the first word in the relocation gets
  10116. the relocation index and the top bit set, or zero,
  10117. if we're binding now. For locals, it gets the
  10118. symbol's offset in the tls section. */
  10119. bfd_put_32 (output_bfd,
  10120. !h ? value - elf_hash_table (info)->tls_sec->vma
  10121. : info->flags & DF_BIND_NOW ? 0
  10122. : 0x80000000 | ELF32_R_SYM (outrel.r_info),
  10123. globals->root.sgotplt->contents + offplt
  10124. + globals->sgotplt_jump_table_size);
  10125. /* Second word in the relocation is always zero. */
  10126. bfd_put_32 (output_bfd, 0,
  10127. globals->root.sgotplt->contents + offplt
  10128. + globals->sgotplt_jump_table_size + 4);
  10129. }
  10130. if (tls_type & GOT_TLS_GD)
  10131. {
  10132. if (need_relocs)
  10133. {
  10134. outrel.r_addend = 0;
  10135. outrel.r_offset = (sgot->output_section->vma
  10136. + sgot->output_offset
  10137. + cur_off);
  10138. outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
  10139. if (globals->use_rel)
  10140. bfd_put_32 (output_bfd, outrel.r_addend,
  10141. sgot->contents + cur_off);
  10142. elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
  10143. if (indx == 0)
  10144. bfd_put_32 (output_bfd, value - dtpoff_base (info),
  10145. sgot->contents + cur_off + 4);
  10146. else
  10147. {
  10148. outrel.r_addend = 0;
  10149. outrel.r_info = ELF32_R_INFO (indx,
  10150. R_ARM_TLS_DTPOFF32);
  10151. outrel.r_offset += 4;
  10152. if (globals->use_rel)
  10153. bfd_put_32 (output_bfd, outrel.r_addend,
  10154. sgot->contents + cur_off + 4);
  10155. elf32_arm_add_dynreloc (output_bfd, info,
  10156. srelgot, &outrel);
  10157. }
  10158. }
  10159. else
  10160. {
  10161. /* If we are not emitting relocations for a
  10162. general dynamic reference, then we must be in a
  10163. static link or an executable link with the
  10164. symbol binding locally. Mark it as belonging
  10165. to module 1, the executable. */
  10166. bfd_put_32 (output_bfd, 1,
  10167. sgot->contents + cur_off);
  10168. bfd_put_32 (output_bfd, value - dtpoff_base (info),
  10169. sgot->contents + cur_off + 4);
  10170. }
  10171. cur_off += 8;
  10172. }
  10173. if (tls_type & GOT_TLS_IE)
  10174. {
  10175. if (need_relocs)
  10176. {
  10177. if (indx == 0)
  10178. outrel.r_addend = value - dtpoff_base (info);
  10179. else
  10180. outrel.r_addend = 0;
  10181. outrel.r_offset = (sgot->output_section->vma
  10182. + sgot->output_offset
  10183. + cur_off);
  10184. outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
  10185. if (globals->use_rel)
  10186. bfd_put_32 (output_bfd, outrel.r_addend,
  10187. sgot->contents + cur_off);
  10188. elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
  10189. }
  10190. else
  10191. bfd_put_32 (output_bfd, tpoff (info, value),
  10192. sgot->contents + cur_off);
  10193. cur_off += 4;
  10194. }
  10195. if (h != NULL)
  10196. h->got.offset |= 1;
  10197. else
  10198. local_got_offsets[r_symndx] |= 1;
  10199. }
  10200. if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
  10201. off += 8;
  10202. else if (tls_type & GOT_TLS_GDESC)
  10203. off = offplt;
  10204. if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
  10205. || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL)
  10206. {
  10207. bfd_signed_vma offset;
  10208. /* TLS stubs are arm mode. The original symbol is a
  10209. data object, so branch_type is bogus. */
  10210. branch_type = ST_BRANCH_TO_ARM;
  10211. enum elf32_arm_stub_type stub_type
  10212. = arm_type_of_stub (info, input_section, rel,
  10213. st_type, &branch_type,
  10214. (struct elf32_arm_link_hash_entry *)h,
  10215. globals->tls_trampoline, globals->root.splt,
  10216. input_bfd, sym_name);
  10217. if (stub_type != arm_stub_none)
  10218. {
  10219. struct elf32_arm_stub_hash_entry *stub_entry
  10220. = elf32_arm_get_stub_entry
  10221. (input_section, globals->root.splt, 0, rel,
  10222. globals, stub_type);
  10223. offset = (stub_entry->stub_offset
  10224. + stub_entry->stub_sec->output_offset
  10225. + stub_entry->stub_sec->output_section->vma);
  10226. }
  10227. else
  10228. offset = (globals->root.splt->output_section->vma
  10229. + globals->root.splt->output_offset
  10230. + globals->tls_trampoline);
  10231. if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL)
  10232. {
  10233. unsigned long inst;
  10234. offset -= (input_section->output_section->vma
  10235. + input_section->output_offset
  10236. + rel->r_offset + 8);
  10237. inst = offset >> 2;
  10238. inst &= 0x00ffffff;
  10239. value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
  10240. }
  10241. else
  10242. {
  10243. /* Thumb blx encodes the offset in a complicated
  10244. fashion. */
  10245. unsigned upper_insn, lower_insn;
  10246. unsigned neg;
  10247. offset -= (input_section->output_section->vma
  10248. + input_section->output_offset
  10249. + rel->r_offset + 4);
  10250. if (stub_type != arm_stub_none
  10251. && arm_stub_is_thumb (stub_type))
  10252. {
  10253. lower_insn = 0xd000;
  10254. }
  10255. else
  10256. {
  10257. lower_insn = 0xc000;
  10258. /* Round up the offset to a word boundary. */
  10259. offset = (offset + 2) & ~2;
  10260. }
  10261. neg = offset < 0;
  10262. upper_insn = (0xf000
  10263. | ((offset >> 12) & 0x3ff)
  10264. | (neg << 10));
  10265. lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
  10266. | (((!((offset >> 22) & 1)) ^ neg) << 11)
  10267. | ((offset >> 1) & 0x7ff);
  10268. bfd_put_16 (input_bfd, upper_insn, hit_data);
  10269. bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
  10270. return bfd_reloc_ok;
  10271. }
  10272. }
  10273. /* These relocations needs special care, as besides the fact
  10274. they point somewhere in .gotplt, the addend must be
  10275. adjusted accordingly depending on the type of instruction
  10276. we refer to. */
  10277. else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
  10278. {
  10279. unsigned long data, insn;
  10280. unsigned thumb;
  10281. data = bfd_get_signed_32 (input_bfd, hit_data);
  10282. thumb = data & 1;
  10283. data &= ~1ul;
  10284. if (thumb)
  10285. {
  10286. insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
  10287. if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
  10288. insn = (insn << 16)
  10289. | bfd_get_16 (input_bfd,
  10290. contents + rel->r_offset - data + 2);
  10291. if ((insn & 0xf800c000) == 0xf000c000)
  10292. /* bl/blx */
  10293. value = -6;
  10294. else if ((insn & 0xffffff00) == 0x4400)
  10295. /* add */
  10296. value = -5;
  10297. else
  10298. {
  10299. _bfd_error_handler
  10300. /* xgettext:c-format */
  10301. (_("%pB(%pA+%#" PRIx64 "): "
  10302. "unexpected %s instruction '%#lx' "
  10303. "referenced by TLS_GOTDESC"),
  10304. input_bfd, input_section, (uint64_t) rel->r_offset,
  10305. "Thumb", insn);
  10306. return bfd_reloc_notsupported;
  10307. }
  10308. }
  10309. else
  10310. {
  10311. insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
  10312. switch (insn >> 24)
  10313. {
  10314. case 0xeb: /* bl */
  10315. case 0xfa: /* blx */
  10316. value = -4;
  10317. break;
  10318. case 0xe0: /* add */
  10319. value = -8;
  10320. break;
  10321. default:
  10322. _bfd_error_handler
  10323. /* xgettext:c-format */
  10324. (_("%pB(%pA+%#" PRIx64 "): "
  10325. "unexpected %s instruction '%#lx' "
  10326. "referenced by TLS_GOTDESC"),
  10327. input_bfd, input_section, (uint64_t) rel->r_offset,
  10328. "ARM", insn);
  10329. return bfd_reloc_notsupported;
  10330. }
  10331. }
  10332. value += ((globals->root.sgotplt->output_section->vma
  10333. + globals->root.sgotplt->output_offset + off)
  10334. - (input_section->output_section->vma
  10335. + input_section->output_offset
  10336. + rel->r_offset)
  10337. + globals->sgotplt_jump_table_size);
  10338. }
  10339. else
  10340. value = ((globals->root.sgot->output_section->vma
  10341. + globals->root.sgot->output_offset + off)
  10342. - (input_section->output_section->vma
  10343. + input_section->output_offset + rel->r_offset));
  10344. if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
  10345. r_type == R_ARM_TLS_IE32_FDPIC))
  10346. {
  10347. /* For FDPIC relocations, resolve to the offset of the GOT
  10348. entry from the start of GOT. */
  10349. bfd_put_32 (output_bfd,
  10350. globals->root.sgot->output_offset + off,
  10351. contents + rel->r_offset);
  10352. return bfd_reloc_ok;
  10353. }
  10354. else
  10355. {
  10356. return _bfd_final_link_relocate (howto, input_bfd, input_section,
  10357. contents, rel->r_offset, value,
  10358. rel->r_addend);
  10359. }
  10360. }
  10361. case R_ARM_TLS_LE32:
  10362. if (bfd_link_dll (info))
  10363. {
  10364. _bfd_error_handler
  10365. /* xgettext:c-format */
  10366. (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
  10367. "in shared object"),
  10368. input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
  10369. return bfd_reloc_notsupported;
  10370. }
  10371. else
  10372. value = tpoff (info, value);
  10373. return _bfd_final_link_relocate (howto, input_bfd, input_section,
  10374. contents, rel->r_offset, value,
  10375. rel->r_addend);
  10376. case R_ARM_V4BX:
  10377. if (globals->fix_v4bx)
  10378. {
  10379. bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
  10380. /* Ensure that we have a BX instruction. */
  10381. BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
  10382. if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
  10383. {
  10384. /* Branch to veneer. */
  10385. bfd_vma glue_addr;
  10386. glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
  10387. glue_addr -= input_section->output_section->vma
  10388. + input_section->output_offset
  10389. + rel->r_offset + 8;
  10390. insn = (insn & 0xf0000000) | 0x0a000000
  10391. | ((glue_addr >> 2) & 0x00ffffff);
  10392. }
  10393. else
  10394. {
  10395. /* Preserve Rm (lowest four bits) and the condition code
  10396. (highest four bits). Other bits encode MOV PC,Rm. */
  10397. insn = (insn & 0xf000000f) | 0x01a0f000;
  10398. }
  10399. bfd_put_32 (input_bfd, insn, hit_data);
  10400. }
  10401. return bfd_reloc_ok;
  10402. case R_ARM_MOVW_ABS_NC:
  10403. case R_ARM_MOVT_ABS:
  10404. case R_ARM_MOVW_PREL_NC:
  10405. case R_ARM_MOVT_PREL:
  10406. /* Until we properly support segment-base-relative addressing then
  10407. we assume the segment base to be zero, as for the group relocations.
  10408. Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
  10409. and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
  10410. case R_ARM_MOVW_BREL_NC:
  10411. case R_ARM_MOVW_BREL:
  10412. case R_ARM_MOVT_BREL:
  10413. {
  10414. bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
  10415. if (globals->use_rel)
  10416. {
  10417. addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
  10418. signed_addend = (addend ^ 0x8000) - 0x8000;
  10419. }
  10420. value += signed_addend;
  10421. if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
  10422. value -= (input_section->output_section->vma
  10423. + input_section->output_offset + rel->r_offset);
  10424. if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
  10425. return bfd_reloc_overflow;
  10426. if (branch_type == ST_BRANCH_TO_THUMB)
  10427. value |= 1;
  10428. if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
  10429. || r_type == R_ARM_MOVT_BREL)
  10430. value >>= 16;
  10431. insn &= 0xfff0f000;
  10432. insn |= value & 0xfff;
  10433. insn |= (value & 0xf000) << 4;
  10434. bfd_put_32 (input_bfd, insn, hit_data);
  10435. }
  10436. return bfd_reloc_ok;
  10437. case R_ARM_THM_MOVW_ABS_NC:
  10438. case R_ARM_THM_MOVT_ABS:
  10439. case R_ARM_THM_MOVW_PREL_NC:
  10440. case R_ARM_THM_MOVT_PREL:
  10441. /* Until we properly support segment-base-relative addressing then
  10442. we assume the segment base to be zero, as for the above relocations.
  10443. Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
  10444. R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
  10445. as R_ARM_THM_MOVT_ABS. */
  10446. case R_ARM_THM_MOVW_BREL_NC:
  10447. case R_ARM_THM_MOVW_BREL:
  10448. case R_ARM_THM_MOVT_BREL:
  10449. {
  10450. bfd_vma insn;
  10451. insn = bfd_get_16 (input_bfd, hit_data) << 16;
  10452. insn |= bfd_get_16 (input_bfd, hit_data + 2);
  10453. if (globals->use_rel)
  10454. {
  10455. addend = ((insn >> 4) & 0xf000)
  10456. | ((insn >> 15) & 0x0800)
  10457. | ((insn >> 4) & 0x0700)
  10458. | (insn & 0x00ff);
  10459. signed_addend = (addend ^ 0x8000) - 0x8000;
  10460. }
  10461. value += signed_addend;
  10462. if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
  10463. value -= (input_section->output_section->vma
  10464. + input_section->output_offset + rel->r_offset);
  10465. if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
  10466. return bfd_reloc_overflow;
  10467. if (branch_type == ST_BRANCH_TO_THUMB)
  10468. value |= 1;
  10469. if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
  10470. || r_type == R_ARM_THM_MOVT_BREL)
  10471. value >>= 16;
  10472. insn &= 0xfbf08f00;
  10473. insn |= (value & 0xf000) << 4;
  10474. insn |= (value & 0x0800) << 15;
  10475. insn |= (value & 0x0700) << 4;
  10476. insn |= (value & 0x00ff);
  10477. bfd_put_16 (input_bfd, insn >> 16, hit_data);
  10478. bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
  10479. }
  10480. return bfd_reloc_ok;
  10481. case R_ARM_ALU_PC_G0_NC:
  10482. case R_ARM_ALU_PC_G1_NC:
  10483. case R_ARM_ALU_PC_G0:
  10484. case R_ARM_ALU_PC_G1:
  10485. case R_ARM_ALU_PC_G2:
  10486. case R_ARM_ALU_SB_G0_NC:
  10487. case R_ARM_ALU_SB_G1_NC:
  10488. case R_ARM_ALU_SB_G0:
  10489. case R_ARM_ALU_SB_G1:
  10490. case R_ARM_ALU_SB_G2:
  10491. {
  10492. bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
  10493. bfd_vma pc = input_section->output_section->vma
  10494. + input_section->output_offset + rel->r_offset;
  10495. /* sb is the origin of the *segment* containing the symbol. */
  10496. bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
  10497. bfd_vma residual;
  10498. bfd_vma g_n;
  10499. bfd_signed_vma signed_value;
  10500. int group = 0;
  10501. /* Determine which group of bits to select. */
  10502. switch (r_type)
  10503. {
  10504. case R_ARM_ALU_PC_G0_NC:
  10505. case R_ARM_ALU_PC_G0:
  10506. case R_ARM_ALU_SB_G0_NC:
  10507. case R_ARM_ALU_SB_G0:
  10508. group = 0;
  10509. break;
  10510. case R_ARM_ALU_PC_G1_NC:
  10511. case R_ARM_ALU_PC_G1:
  10512. case R_ARM_ALU_SB_G1_NC:
  10513. case R_ARM_ALU_SB_G1:
  10514. group = 1;
  10515. break;
  10516. case R_ARM_ALU_PC_G2:
  10517. case R_ARM_ALU_SB_G2:
  10518. group = 2;
  10519. break;
  10520. default:
  10521. abort ();
  10522. }
  10523. /* If REL, extract the addend from the insn. If RELA, it will
  10524. have already been fetched for us. */
  10525. if (globals->use_rel)
  10526. {
  10527. int negative;
  10528. bfd_vma constant = insn & 0xff;
  10529. bfd_vma rotation = (insn & 0xf00) >> 8;
  10530. if (rotation == 0)
  10531. signed_addend = constant;
  10532. else
  10533. {
  10534. /* Compensate for the fact that in the instruction, the
  10535. rotation is stored in multiples of 2 bits. */
  10536. rotation *= 2;
  10537. /* Rotate "constant" right by "rotation" bits. */
  10538. signed_addend = (constant >> rotation) |
  10539. (constant << (8 * sizeof (bfd_vma) - rotation));
  10540. }
  10541. /* Determine if the instruction is an ADD or a SUB.
  10542. (For REL, this determines the sign of the addend.) */
  10543. negative = identify_add_or_sub (insn);
  10544. if (negative == 0)
  10545. {
  10546. _bfd_error_handler
  10547. /* xgettext:c-format */
  10548. (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
  10549. "are allowed for ALU group relocations"),
  10550. input_bfd, input_section, (uint64_t) rel->r_offset);
  10551. return bfd_reloc_overflow;
  10552. }
  10553. signed_addend *= negative;
  10554. }
  10555. /* Compute the value (X) to go in the place. */
  10556. if (r_type == R_ARM_ALU_PC_G0_NC
  10557. || r_type == R_ARM_ALU_PC_G1_NC
  10558. || r_type == R_ARM_ALU_PC_G0
  10559. || r_type == R_ARM_ALU_PC_G1
  10560. || r_type == R_ARM_ALU_PC_G2)
  10561. /* PC relative. */
  10562. signed_value = value - pc + signed_addend;
  10563. else
  10564. /* Section base relative. */
  10565. signed_value = value - sb + signed_addend;
  10566. /* If the target symbol is a Thumb function, then set the
  10567. Thumb bit in the address. */
  10568. if (branch_type == ST_BRANCH_TO_THUMB)
  10569. signed_value |= 1;
  10570. /* Calculate the value of the relevant G_n, in encoded
  10571. constant-with-rotation format. */
  10572. g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
  10573. group, &residual);
  10574. /* Check for overflow if required. */
  10575. if ((r_type == R_ARM_ALU_PC_G0
  10576. || r_type == R_ARM_ALU_PC_G1
  10577. || r_type == R_ARM_ALU_PC_G2
  10578. || r_type == R_ARM_ALU_SB_G0
  10579. || r_type == R_ARM_ALU_SB_G1
  10580. || r_type == R_ARM_ALU_SB_G2) && residual != 0)
  10581. {
  10582. _bfd_error_handler
  10583. /* xgettext:c-format */
  10584. (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
  10585. "splitting %#" PRIx64 " for group relocation %s"),
  10586. input_bfd, input_section, (uint64_t) rel->r_offset,
  10587. (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
  10588. howto->name);
  10589. return bfd_reloc_overflow;
  10590. }
  10591. /* Mask out the value and the ADD/SUB part of the opcode; take care
  10592. not to destroy the S bit. */
  10593. insn &= 0xff1ff000;
  10594. /* Set the opcode according to whether the value to go in the
  10595. place is negative. */
  10596. if (signed_value < 0)
  10597. insn |= 1 << 22;
  10598. else
  10599. insn |= 1 << 23;
  10600. /* Encode the offset. */
  10601. insn |= g_n;
  10602. bfd_put_32 (input_bfd, insn, hit_data);
  10603. }
  10604. return bfd_reloc_ok;
  10605. case R_ARM_LDR_PC_G0:
  10606. case R_ARM_LDR_PC_G1:
  10607. case R_ARM_LDR_PC_G2:
  10608. case R_ARM_LDR_SB_G0:
  10609. case R_ARM_LDR_SB_G1:
  10610. case R_ARM_LDR_SB_G2:
  10611. {
  10612. bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
  10613. bfd_vma pc = input_section->output_section->vma
  10614. + input_section->output_offset + rel->r_offset;
  10615. /* sb is the origin of the *segment* containing the symbol. */
  10616. bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
  10617. bfd_vma residual;
  10618. bfd_signed_vma signed_value;
  10619. int group = 0;
  10620. /* Determine which groups of bits to calculate. */
  10621. switch (r_type)
  10622. {
  10623. case R_ARM_LDR_PC_G0:
  10624. case R_ARM_LDR_SB_G0:
  10625. group = 0;
  10626. break;
  10627. case R_ARM_LDR_PC_G1:
  10628. case R_ARM_LDR_SB_G1:
  10629. group = 1;
  10630. break;
  10631. case R_ARM_LDR_PC_G2:
  10632. case R_ARM_LDR_SB_G2:
  10633. group = 2;
  10634. break;
  10635. default:
  10636. abort ();
  10637. }
  10638. /* If REL, extract the addend from the insn. If RELA, it will
  10639. have already been fetched for us. */
  10640. if (globals->use_rel)
  10641. {
  10642. int negative = (insn & (1 << 23)) ? 1 : -1;
  10643. signed_addend = negative * (insn & 0xfff);
  10644. }
  10645. /* Compute the value (X) to go in the place. */
  10646. if (r_type == R_ARM_LDR_PC_G0
  10647. || r_type == R_ARM_LDR_PC_G1
  10648. || r_type == R_ARM_LDR_PC_G2)
  10649. /* PC relative. */
  10650. signed_value = value - pc + signed_addend;
  10651. else
  10652. /* Section base relative. */
  10653. signed_value = value - sb + signed_addend;
  10654. /* Calculate the value of the relevant G_{n-1} to obtain
  10655. the residual at that stage. */
  10656. calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
  10657. group - 1, &residual);
  10658. /* Check for overflow. */
  10659. if (residual >= 0x1000)
  10660. {
  10661. _bfd_error_handler
  10662. /* xgettext:c-format */
  10663. (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
  10664. "splitting %#" PRIx64 " for group relocation %s"),
  10665. input_bfd, input_section, (uint64_t) rel->r_offset,
  10666. (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
  10667. howto->name);
  10668. return bfd_reloc_overflow;
  10669. }
  10670. /* Mask out the value and U bit. */
  10671. insn &= 0xff7ff000;
  10672. /* Set the U bit if the value to go in the place is non-negative. */
  10673. if (signed_value >= 0)
  10674. insn |= 1 << 23;
  10675. /* Encode the offset. */
  10676. insn |= residual;
  10677. bfd_put_32 (input_bfd, insn, hit_data);
  10678. }
  10679. return bfd_reloc_ok;
  10680. case R_ARM_LDRS_PC_G0:
  10681. case R_ARM_LDRS_PC_G1:
  10682. case R_ARM_LDRS_PC_G2:
  10683. case R_ARM_LDRS_SB_G0:
  10684. case R_ARM_LDRS_SB_G1:
  10685. case R_ARM_LDRS_SB_G2:
  10686. {
  10687. bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
  10688. bfd_vma pc = input_section->output_section->vma
  10689. + input_section->output_offset + rel->r_offset;
  10690. /* sb is the origin of the *segment* containing the symbol. */
  10691. bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
  10692. bfd_vma residual;
  10693. bfd_signed_vma signed_value;
  10694. int group = 0;
  10695. /* Determine which groups of bits to calculate. */
  10696. switch (r_type)
  10697. {
  10698. case R_ARM_LDRS_PC_G0:
  10699. case R_ARM_LDRS_SB_G0:
  10700. group = 0;
  10701. break;
  10702. case R_ARM_LDRS_PC_G1:
  10703. case R_ARM_LDRS_SB_G1:
  10704. group = 1;
  10705. break;
  10706. case R_ARM_LDRS_PC_G2:
  10707. case R_ARM_LDRS_SB_G2:
  10708. group = 2;
  10709. break;
  10710. default:
  10711. abort ();
  10712. }
  10713. /* If REL, extract the addend from the insn. If RELA, it will
  10714. have already been fetched for us. */
  10715. if (globals->use_rel)
  10716. {
  10717. int negative = (insn & (1 << 23)) ? 1 : -1;
  10718. signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
  10719. }
  10720. /* Compute the value (X) to go in the place. */
  10721. if (r_type == R_ARM_LDRS_PC_G0
  10722. || r_type == R_ARM_LDRS_PC_G1
  10723. || r_type == R_ARM_LDRS_PC_G2)
  10724. /* PC relative. */
  10725. signed_value = value - pc + signed_addend;
  10726. else
  10727. /* Section base relative. */
  10728. signed_value = value - sb + signed_addend;
  10729. /* Calculate the value of the relevant G_{n-1} to obtain
  10730. the residual at that stage. */
  10731. calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
  10732. group - 1, &residual);
  10733. /* Check for overflow. */
  10734. if (residual >= 0x100)
  10735. {
  10736. _bfd_error_handler
  10737. /* xgettext:c-format */
  10738. (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
  10739. "splitting %#" PRIx64 " for group relocation %s"),
  10740. input_bfd, input_section, (uint64_t) rel->r_offset,
  10741. (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
  10742. howto->name);
  10743. return bfd_reloc_overflow;
  10744. }
  10745. /* Mask out the value and U bit. */
  10746. insn &= 0xff7ff0f0;
  10747. /* Set the U bit if the value to go in the place is non-negative. */
  10748. if (signed_value >= 0)
  10749. insn |= 1 << 23;
  10750. /* Encode the offset. */
  10751. insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
  10752. bfd_put_32 (input_bfd, insn, hit_data);
  10753. }
  10754. return bfd_reloc_ok;
  10755. case R_ARM_LDC_PC_G0:
  10756. case R_ARM_LDC_PC_G1:
  10757. case R_ARM_LDC_PC_G2:
  10758. case R_ARM_LDC_SB_G0:
  10759. case R_ARM_LDC_SB_G1:
  10760. case R_ARM_LDC_SB_G2:
  10761. {
  10762. bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
  10763. bfd_vma pc = input_section->output_section->vma
  10764. + input_section->output_offset + rel->r_offset;
  10765. /* sb is the origin of the *segment* containing the symbol. */
  10766. bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
  10767. bfd_vma residual;
  10768. bfd_signed_vma signed_value;
  10769. int group = 0;
  10770. /* Determine which groups of bits to calculate. */
  10771. switch (r_type)
  10772. {
  10773. case R_ARM_LDC_PC_G0:
  10774. case R_ARM_LDC_SB_G0:
  10775. group = 0;
  10776. break;
  10777. case R_ARM_LDC_PC_G1:
  10778. case R_ARM_LDC_SB_G1:
  10779. group = 1;
  10780. break;
  10781. case R_ARM_LDC_PC_G2:
  10782. case R_ARM_LDC_SB_G2:
  10783. group = 2;
  10784. break;
  10785. default:
  10786. abort ();
  10787. }
  10788. /* If REL, extract the addend from the insn. If RELA, it will
  10789. have already been fetched for us. */
  10790. if (globals->use_rel)
  10791. {
  10792. int negative = (insn & (1 << 23)) ? 1 : -1;
  10793. signed_addend = negative * ((insn & 0xff) << 2);
  10794. }
  10795. /* Compute the value (X) to go in the place. */
  10796. if (r_type == R_ARM_LDC_PC_G0
  10797. || r_type == R_ARM_LDC_PC_G1
  10798. || r_type == R_ARM_LDC_PC_G2)
  10799. /* PC relative. */
  10800. signed_value = value - pc + signed_addend;
  10801. else
  10802. /* Section base relative. */
  10803. signed_value = value - sb + signed_addend;
  10804. /* Calculate the value of the relevant G_{n-1} to obtain
  10805. the residual at that stage. */
  10806. calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
  10807. group - 1, &residual);
  10808. /* Check for overflow. (The absolute value to go in the place must be
  10809. divisible by four and, after having been divided by four, must
  10810. fit in eight bits.) */
  10811. if ((residual & 0x3) != 0 || residual >= 0x400)
  10812. {
  10813. _bfd_error_handler
  10814. /* xgettext:c-format */
  10815. (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
  10816. "splitting %#" PRIx64 " for group relocation %s"),
  10817. input_bfd, input_section, (uint64_t) rel->r_offset,
  10818. (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
  10819. howto->name);
  10820. return bfd_reloc_overflow;
  10821. }
  10822. /* Mask out the value and U bit. */
  10823. insn &= 0xff7fff00;
  10824. /* Set the U bit if the value to go in the place is non-negative. */
  10825. if (signed_value >= 0)
  10826. insn |= 1 << 23;
  10827. /* Encode the offset. */
  10828. insn |= residual >> 2;
  10829. bfd_put_32 (input_bfd, insn, hit_data);
  10830. }
  10831. return bfd_reloc_ok;
  10832. case R_ARM_THM_ALU_ABS_G0_NC:
  10833. case R_ARM_THM_ALU_ABS_G1_NC:
  10834. case R_ARM_THM_ALU_ABS_G2_NC:
  10835. case R_ARM_THM_ALU_ABS_G3_NC:
  10836. {
  10837. const int shift_array[4] = {0, 8, 16, 24};
  10838. bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
  10839. bfd_vma addr = value;
  10840. int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
  10841. /* Compute address. */
  10842. if (globals->use_rel)
  10843. signed_addend = insn & 0xff;
  10844. addr += signed_addend;
  10845. if (branch_type == ST_BRANCH_TO_THUMB)
  10846. addr |= 1;
  10847. /* Clean imm8 insn. */
  10848. insn &= 0xff00;
  10849. /* And update with correct part of address. */
  10850. insn |= (addr >> shift) & 0xff;
  10851. /* Update insn. */
  10852. bfd_put_16 (input_bfd, insn, hit_data);
  10853. }
  10854. *unresolved_reloc_p = false;
  10855. return bfd_reloc_ok;
  10856. case R_ARM_GOTOFFFUNCDESC:
  10857. {
  10858. if (h == NULL)
  10859. {
  10860. struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
  10861. int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
  10862. if (r_symndx >= elf32_arm_num_entries (input_bfd))
  10863. {
  10864. * error_message = _("local symbol index too big");
  10865. return bfd_reloc_dangerous;
  10866. }
  10867. int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
  10868. bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
  10869. bfd_vma seg = -1;
  10870. if (bfd_link_pic (info) && dynindx == 0)
  10871. {
  10872. * error_message = _("no dynamic index information available");
  10873. return bfd_reloc_dangerous;
  10874. }
  10875. /* Resolve relocation. */
  10876. bfd_put_32 (output_bfd, (offset + sgot->output_offset)
  10877. , contents + rel->r_offset);
  10878. /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
  10879. not done yet. */
  10880. arm_elf_fill_funcdesc (output_bfd, info,
  10881. &local_fdpic_cnts[r_symndx].funcdesc_offset,
  10882. dynindx, offset, addr, dynreloc_value, seg);
  10883. }
  10884. else
  10885. {
  10886. int dynindx;
  10887. int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
  10888. bfd_vma addr;
  10889. bfd_vma seg = -1;
  10890. /* For static binaries, sym_sec can be null. */
  10891. if (sym_sec)
  10892. {
  10893. dynindx = elf_section_data (sym_sec->output_section)->dynindx;
  10894. addr = dynreloc_value - sym_sec->output_section->vma;
  10895. }
  10896. else
  10897. {
  10898. dynindx = 0;
  10899. addr = 0;
  10900. }
  10901. if (bfd_link_pic (info) && dynindx == 0)
  10902. {
  10903. * error_message = _("no dynamic index information available");
  10904. return bfd_reloc_dangerous;
  10905. }
  10906. /* This case cannot occur since funcdesc is allocated by
  10907. the dynamic loader so we cannot resolve the relocation. */
  10908. if (h->dynindx != -1)
  10909. {
  10910. * error_message = _("invalid dynamic index");
  10911. return bfd_reloc_dangerous;
  10912. }
  10913. /* Resolve relocation. */
  10914. bfd_put_32 (output_bfd, (offset + sgot->output_offset),
  10915. contents + rel->r_offset);
  10916. /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
  10917. arm_elf_fill_funcdesc (output_bfd, info,
  10918. &eh->fdpic_cnts.funcdesc_offset,
  10919. dynindx, offset, addr, dynreloc_value, seg);
  10920. }
  10921. }
  10922. *unresolved_reloc_p = false;
  10923. return bfd_reloc_ok;
  10924. case R_ARM_GOTFUNCDESC:
  10925. {
  10926. if (h != NULL)
  10927. {
  10928. Elf_Internal_Rela outrel;
  10929. /* Resolve relocation. */
  10930. bfd_put_32 (output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
  10931. + sgot->output_offset),
  10932. contents + rel->r_offset);
  10933. /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
  10934. if (h->dynindx == -1)
  10935. {
  10936. int dynindx;
  10937. int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
  10938. bfd_vma addr;
  10939. bfd_vma seg = -1;
  10940. /* For static binaries sym_sec can be null. */
  10941. if (sym_sec)
  10942. {
  10943. dynindx = elf_section_data (sym_sec->output_section)->dynindx;
  10944. addr = dynreloc_value - sym_sec->output_section->vma;
  10945. }
  10946. else
  10947. {
  10948. dynindx = 0;
  10949. addr = 0;
  10950. }
  10951. /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
  10952. arm_elf_fill_funcdesc (output_bfd, info,
  10953. &eh->fdpic_cnts.funcdesc_offset,
  10954. dynindx, offset, addr, dynreloc_value, seg);
  10955. }
  10956. /* Add a dynamic relocation on GOT entry if not already done. */
  10957. if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
  10958. {
  10959. if (h->dynindx == -1)
  10960. {
  10961. outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
  10962. if (h->root.type == bfd_link_hash_undefweak)
  10963. bfd_put_32 (output_bfd, 0, sgot->contents
  10964. + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
  10965. else
  10966. bfd_put_32 (output_bfd, sgot->output_section->vma
  10967. + sgot->output_offset
  10968. + (eh->fdpic_cnts.funcdesc_offset & ~1),
  10969. sgot->contents
  10970. + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
  10971. }
  10972. else
  10973. {
  10974. outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
  10975. }
  10976. outrel.r_offset = sgot->output_section->vma
  10977. + sgot->output_offset
  10978. + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
  10979. outrel.r_addend = 0;
  10980. if (h->dynindx == -1 && !bfd_link_pic (info))
  10981. if (h->root.type == bfd_link_hash_undefweak)
  10982. arm_elf_add_rofixup (output_bfd, globals->srofixup, -1);
  10983. else
  10984. arm_elf_add_rofixup (output_bfd, globals->srofixup,
  10985. outrel.r_offset);
  10986. else
  10987. elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
  10988. eh->fdpic_cnts.gotfuncdesc_offset |= 1;
  10989. }
  10990. }
  10991. else
  10992. {
  10993. /* Such relocation on static function should not have been
  10994. emitted by the compiler. */
  10995. return bfd_reloc_notsupported;
  10996. }
  10997. }
  10998. *unresolved_reloc_p = false;
  10999. return bfd_reloc_ok;
  11000. case R_ARM_FUNCDESC:
  11001. {
  11002. if (h == NULL)
  11003. {
  11004. struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
  11005. Elf_Internal_Rela outrel;
  11006. int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
  11007. if (r_symndx >= elf32_arm_num_entries (input_bfd))
  11008. {
  11009. * error_message = _("local symbol index too big");
  11010. return bfd_reloc_dangerous;
  11011. }
  11012. int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
  11013. bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
  11014. bfd_vma seg = -1;
  11015. if (bfd_link_pic (info) && dynindx == 0)
  11016. {
  11017. * error_message = _("dynamic index information not available");
  11018. return bfd_reloc_dangerous;
  11019. }
  11020. /* Replace static FUNCDESC relocation with a
  11021. R_ARM_RELATIVE dynamic relocation or with a rofixup for
  11022. executable. */
  11023. outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
  11024. outrel.r_offset = input_section->output_section->vma
  11025. + input_section->output_offset + rel->r_offset;
  11026. outrel.r_addend = 0;
  11027. if (bfd_link_pic (info))
  11028. elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
  11029. else
  11030. arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
  11031. bfd_put_32 (input_bfd, sgot->output_section->vma
  11032. + sgot->output_offset + offset, hit_data);
  11033. /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
  11034. arm_elf_fill_funcdesc (output_bfd, info,
  11035. &local_fdpic_cnts[r_symndx].funcdesc_offset,
  11036. dynindx, offset, addr, dynreloc_value, seg);
  11037. }
  11038. else
  11039. {
  11040. if (h->dynindx == -1)
  11041. {
  11042. int dynindx;
  11043. int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
  11044. bfd_vma addr;
  11045. bfd_vma seg = -1;
  11046. Elf_Internal_Rela outrel;
  11047. /* For static binaries sym_sec can be null. */
  11048. if (sym_sec)
  11049. {
  11050. dynindx = elf_section_data (sym_sec->output_section)->dynindx;
  11051. addr = dynreloc_value - sym_sec->output_section->vma;
  11052. }
  11053. else
  11054. {
  11055. dynindx = 0;
  11056. addr = 0;
  11057. }
  11058. if (bfd_link_pic (info) && dynindx == 0)
  11059. abort ();
  11060. /* Replace static FUNCDESC relocation with a
  11061. R_ARM_RELATIVE dynamic relocation. */
  11062. outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
  11063. outrel.r_offset = input_section->output_section->vma
  11064. + input_section->output_offset + rel->r_offset;
  11065. outrel.r_addend = 0;
  11066. if (bfd_link_pic (info))
  11067. elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
  11068. else
  11069. arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
  11070. bfd_put_32 (input_bfd, sgot->output_section->vma
  11071. + sgot->output_offset + offset, hit_data);
  11072. /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
  11073. arm_elf_fill_funcdesc (output_bfd, info,
  11074. &eh->fdpic_cnts.funcdesc_offset,
  11075. dynindx, offset, addr, dynreloc_value, seg);
  11076. }
  11077. else
  11078. {
  11079. Elf_Internal_Rela outrel;
  11080. /* Add a dynamic relocation. */
  11081. outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
  11082. outrel.r_offset = input_section->output_section->vma
  11083. + input_section->output_offset + rel->r_offset;
  11084. outrel.r_addend = 0;
  11085. elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
  11086. }
  11087. }
  11088. }
  11089. *unresolved_reloc_p = false;
  11090. return bfd_reloc_ok;
  11091. case R_ARM_THM_BF16:
  11092. {
  11093. bfd_vma relocation;
  11094. bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
  11095. bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
  11096. if (globals->use_rel)
  11097. {
  11098. bfd_vma immA = (upper_insn & 0x001f);
  11099. bfd_vma immB = (lower_insn & 0x07fe) >> 1;
  11100. bfd_vma immC = (lower_insn & 0x0800) >> 11;
  11101. addend = (immA << 12);
  11102. addend |= (immB << 2);
  11103. addend |= (immC << 1);
  11104. addend |= 1;
  11105. /* Sign extend. */
  11106. signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
  11107. }
  11108. relocation = value + signed_addend;
  11109. relocation -= (input_section->output_section->vma
  11110. + input_section->output_offset
  11111. + rel->r_offset);
  11112. /* Put RELOCATION back into the insn. */
  11113. {
  11114. bfd_vma immA = (relocation & 0x0001f000) >> 12;
  11115. bfd_vma immB = (relocation & 0x00000ffc) >> 2;
  11116. bfd_vma immC = (relocation & 0x00000002) >> 1;
  11117. upper_insn = (upper_insn & 0xffe0) | immA;
  11118. lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
  11119. }
  11120. /* Put the relocated value back in the object file: */
  11121. bfd_put_16 (input_bfd, upper_insn, hit_data);
  11122. bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
  11123. return bfd_reloc_ok;
  11124. }
  11125. case R_ARM_THM_BF12:
  11126. {
  11127. bfd_vma relocation;
  11128. bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
  11129. bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
  11130. if (globals->use_rel)
  11131. {
  11132. bfd_vma immA = (upper_insn & 0x0001);
  11133. bfd_vma immB = (lower_insn & 0x07fe) >> 1;
  11134. bfd_vma immC = (lower_insn & 0x0800) >> 11;
  11135. addend = (immA << 12);
  11136. addend |= (immB << 2);
  11137. addend |= (immC << 1);
  11138. addend |= 1;
  11139. /* Sign extend. */
  11140. addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
  11141. signed_addend = addend;
  11142. }
  11143. relocation = value + signed_addend;
  11144. relocation -= (input_section->output_section->vma
  11145. + input_section->output_offset
  11146. + rel->r_offset);
  11147. /* Put RELOCATION back into the insn. */
  11148. {
  11149. bfd_vma immA = (relocation & 0x00001000) >> 12;
  11150. bfd_vma immB = (relocation & 0x00000ffc) >> 2;
  11151. bfd_vma immC = (relocation & 0x00000002) >> 1;
  11152. upper_insn = (upper_insn & 0xfffe) | immA;
  11153. lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
  11154. }
  11155. /* Put the relocated value back in the object file: */
  11156. bfd_put_16 (input_bfd, upper_insn, hit_data);
  11157. bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
  11158. return bfd_reloc_ok;
  11159. }
  11160. case R_ARM_THM_BF18:
  11161. {
  11162. bfd_vma relocation;
  11163. bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
  11164. bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
  11165. if (globals->use_rel)
  11166. {
  11167. bfd_vma immA = (upper_insn & 0x007f);
  11168. bfd_vma immB = (lower_insn & 0x07fe) >> 1;
  11169. bfd_vma immC = (lower_insn & 0x0800) >> 11;
  11170. addend = (immA << 12);
  11171. addend |= (immB << 2);
  11172. addend |= (immC << 1);
  11173. addend |= 1;
  11174. /* Sign extend. */
  11175. addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
  11176. signed_addend = addend;
  11177. }
  11178. relocation = value + signed_addend;
  11179. relocation -= (input_section->output_section->vma
  11180. + input_section->output_offset
  11181. + rel->r_offset);
  11182. /* Put RELOCATION back into the insn. */
  11183. {
  11184. bfd_vma immA = (relocation & 0x0007f000) >> 12;
  11185. bfd_vma immB = (relocation & 0x00000ffc) >> 2;
  11186. bfd_vma immC = (relocation & 0x00000002) >> 1;
  11187. upper_insn = (upper_insn & 0xff80) | immA;
  11188. lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
  11189. }
  11190. /* Put the relocated value back in the object file: */
  11191. bfd_put_16 (input_bfd, upper_insn, hit_data);
  11192. bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
  11193. return bfd_reloc_ok;
  11194. }
  11195. default:
  11196. return bfd_reloc_notsupported;
  11197. }
  11198. }
  11199. /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
  11200. static void
  11201. arm_add_to_rel (bfd * abfd,
  11202. bfd_byte * address,
  11203. reloc_howto_type * howto,
  11204. bfd_signed_vma increment)
  11205. {
  11206. bfd_signed_vma addend;
  11207. if (howto->type == R_ARM_THM_CALL
  11208. || howto->type == R_ARM_THM_JUMP24)
  11209. {
  11210. int upper_insn, lower_insn;
  11211. int upper, lower;
  11212. upper_insn = bfd_get_16 (abfd, address);
  11213. lower_insn = bfd_get_16 (abfd, address + 2);
  11214. upper = upper_insn & 0x7ff;
  11215. lower = lower_insn & 0x7ff;
  11216. addend = (upper << 12) | (lower << 1);
  11217. addend += increment;
  11218. addend >>= 1;
  11219. upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
  11220. lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
  11221. bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
  11222. bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
  11223. }
  11224. else
  11225. {
  11226. bfd_vma contents;
  11227. contents = bfd_get_32 (abfd, address);
  11228. /* Get the (signed) value from the instruction. */
  11229. addend = contents & howto->src_mask;
  11230. if (addend & ((howto->src_mask + 1) >> 1))
  11231. {
  11232. bfd_signed_vma mask;
  11233. mask = -1;
  11234. mask &= ~ howto->src_mask;
  11235. addend |= mask;
  11236. }
  11237. /* Add in the increment, (which is a byte value). */
  11238. switch (howto->type)
  11239. {
  11240. default:
  11241. addend += increment;
  11242. break;
  11243. case R_ARM_PC24:
  11244. case R_ARM_PLT32:
  11245. case R_ARM_CALL:
  11246. case R_ARM_JUMP24:
  11247. addend <<= howto->size;
  11248. addend += increment;
  11249. /* Should we check for overflow here ? */
  11250. /* Drop any undesired bits. */
  11251. addend >>= howto->rightshift;
  11252. break;
  11253. }
  11254. contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
  11255. bfd_put_32 (abfd, contents, address);
  11256. }
  11257. }
  11258. #define IS_ARM_TLS_RELOC(R_TYPE) \
  11259. ((R_TYPE) == R_ARM_TLS_GD32 \
  11260. || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
  11261. || (R_TYPE) == R_ARM_TLS_LDO32 \
  11262. || (R_TYPE) == R_ARM_TLS_LDM32 \
  11263. || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
  11264. || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
  11265. || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
  11266. || (R_TYPE) == R_ARM_TLS_TPOFF32 \
  11267. || (R_TYPE) == R_ARM_TLS_LE32 \
  11268. || (R_TYPE) == R_ARM_TLS_IE32 \
  11269. || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
  11270. || IS_ARM_TLS_GNU_RELOC (R_TYPE))
  11271. /* Specific set of relocations for the gnu tls dialect. */
  11272. #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
  11273. ((R_TYPE) == R_ARM_TLS_GOTDESC \
  11274. || (R_TYPE) == R_ARM_TLS_CALL \
  11275. || (R_TYPE) == R_ARM_THM_TLS_CALL \
  11276. || (R_TYPE) == R_ARM_TLS_DESCSEQ \
  11277. || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
  11278. /* Relocate an ARM ELF section. */
  11279. static int
  11280. elf32_arm_relocate_section (bfd * output_bfd,
  11281. struct bfd_link_info * info,
  11282. bfd * input_bfd,
  11283. asection * input_section,
  11284. bfd_byte * contents,
  11285. Elf_Internal_Rela * relocs,
  11286. Elf_Internal_Sym * local_syms,
  11287. asection ** local_sections)
  11288. {
  11289. Elf_Internal_Shdr *symtab_hdr;
  11290. struct elf_link_hash_entry **sym_hashes;
  11291. Elf_Internal_Rela *rel;
  11292. Elf_Internal_Rela *relend;
  11293. const char *name;
  11294. struct elf32_arm_link_hash_table * globals;
  11295. globals = elf32_arm_hash_table (info);
  11296. if (globals == NULL)
  11297. return false;
  11298. symtab_hdr = & elf_symtab_hdr (input_bfd);
  11299. sym_hashes = elf_sym_hashes (input_bfd);
  11300. rel = relocs;
  11301. relend = relocs + input_section->reloc_count;
  11302. for (; rel < relend; rel++)
  11303. {
  11304. int r_type;
  11305. reloc_howto_type * howto;
  11306. unsigned long r_symndx;
  11307. Elf_Internal_Sym * sym;
  11308. asection * sec;
  11309. struct elf_link_hash_entry * h;
  11310. bfd_vma relocation;
  11311. bfd_reloc_status_type r;
  11312. arelent bfd_reloc;
  11313. char sym_type;
  11314. bool unresolved_reloc = false;
  11315. char *error_message = NULL;
  11316. r_symndx = ELF32_R_SYM (rel->r_info);
  11317. r_type = ELF32_R_TYPE (rel->r_info);
  11318. r_type = arm_real_reloc_type (globals, r_type);
  11319. if ( r_type == R_ARM_GNU_VTENTRY
  11320. || r_type == R_ARM_GNU_VTINHERIT)
  11321. continue;
  11322. howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
  11323. if (howto == NULL)
  11324. return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
  11325. h = NULL;
  11326. sym = NULL;
  11327. sec = NULL;
  11328. if (r_symndx < symtab_hdr->sh_info)
  11329. {
  11330. sym = local_syms + r_symndx;
  11331. sym_type = ELF32_ST_TYPE (sym->st_info);
  11332. sec = local_sections[r_symndx];
  11333. /* An object file might have a reference to a local
  11334. undefined symbol. This is a daft object file, but we
  11335. should at least do something about it. V4BX & NONE
  11336. relocations do not use the symbol and are explicitly
  11337. allowed to use the undefined symbol, so allow those.
  11338. Likewise for relocations against STN_UNDEF. */
  11339. if (r_type != R_ARM_V4BX
  11340. && r_type != R_ARM_NONE
  11341. && r_symndx != STN_UNDEF
  11342. && bfd_is_und_section (sec)
  11343. && ELF_ST_BIND (sym->st_info) != STB_WEAK)
  11344. (*info->callbacks->undefined_symbol)
  11345. (info, bfd_elf_string_from_elf_section
  11346. (input_bfd, symtab_hdr->sh_link, sym->st_name),
  11347. input_bfd, input_section,
  11348. rel->r_offset, true);
  11349. if (globals->use_rel)
  11350. {
  11351. relocation = (sec->output_section->vma
  11352. + sec->output_offset
  11353. + sym->st_value);
  11354. if (!bfd_link_relocatable (info)
  11355. && (sec->flags & SEC_MERGE)
  11356. && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
  11357. {
  11358. asection *msec;
  11359. bfd_vma addend, value;
  11360. switch (r_type)
  11361. {
  11362. case R_ARM_MOVW_ABS_NC:
  11363. case R_ARM_MOVT_ABS:
  11364. value = bfd_get_32 (input_bfd, contents + rel->r_offset);
  11365. addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
  11366. addend = (addend ^ 0x8000) - 0x8000;
  11367. break;
  11368. case R_ARM_THM_MOVW_ABS_NC:
  11369. case R_ARM_THM_MOVT_ABS:
  11370. value = bfd_get_16 (input_bfd, contents + rel->r_offset)
  11371. << 16;
  11372. value |= bfd_get_16 (input_bfd,
  11373. contents + rel->r_offset + 2);
  11374. addend = ((value & 0xf7000) >> 4) | (value & 0xff)
  11375. | ((value & 0x04000000) >> 15);
  11376. addend = (addend ^ 0x8000) - 0x8000;
  11377. break;
  11378. default:
  11379. if (howto->rightshift
  11380. || (howto->src_mask & (howto->src_mask + 1)))
  11381. {
  11382. _bfd_error_handler
  11383. /* xgettext:c-format */
  11384. (_("%pB(%pA+%#" PRIx64 "): "
  11385. "%s relocation against SEC_MERGE section"),
  11386. input_bfd, input_section,
  11387. (uint64_t) rel->r_offset, howto->name);
  11388. return false;
  11389. }
  11390. value = bfd_get_32 (input_bfd, contents + rel->r_offset);
  11391. /* Get the (signed) value from the instruction. */
  11392. addend = value & howto->src_mask;
  11393. if (addend & ((howto->src_mask + 1) >> 1))
  11394. {
  11395. bfd_signed_vma mask;
  11396. mask = -1;
  11397. mask &= ~ howto->src_mask;
  11398. addend |= mask;
  11399. }
  11400. break;
  11401. }
  11402. msec = sec;
  11403. addend =
  11404. _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
  11405. - relocation;
  11406. addend += msec->output_section->vma + msec->output_offset;
  11407. /* Cases here must match those in the preceding
  11408. switch statement. */
  11409. switch (r_type)
  11410. {
  11411. case R_ARM_MOVW_ABS_NC:
  11412. case R_ARM_MOVT_ABS:
  11413. value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
  11414. | (addend & 0xfff);
  11415. bfd_put_32 (input_bfd, value, contents + rel->r_offset);
  11416. break;
  11417. case R_ARM_THM_MOVW_ABS_NC:
  11418. case R_ARM_THM_MOVT_ABS:
  11419. value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
  11420. | (addend & 0xff) | ((addend & 0x0800) << 15);
  11421. bfd_put_16 (input_bfd, value >> 16,
  11422. contents + rel->r_offset);
  11423. bfd_put_16 (input_bfd, value,
  11424. contents + rel->r_offset + 2);
  11425. break;
  11426. default:
  11427. value = (value & ~ howto->dst_mask)
  11428. | (addend & howto->dst_mask);
  11429. bfd_put_32 (input_bfd, value, contents + rel->r_offset);
  11430. break;
  11431. }
  11432. }
  11433. }
  11434. else
  11435. relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
  11436. }
  11437. else
  11438. {
  11439. bool warned, ignored;
  11440. RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
  11441. r_symndx, symtab_hdr, sym_hashes,
  11442. h, sec, relocation,
  11443. unresolved_reloc, warned, ignored);
  11444. sym_type = h->type;
  11445. }
  11446. if (sec != NULL && discarded_section (sec))
  11447. RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
  11448. rel, 1, relend, howto, 0, contents);
  11449. if (bfd_link_relocatable (info))
  11450. {
  11451. /* This is a relocatable link. We don't have to change
  11452. anything, unless the reloc is against a section symbol,
  11453. in which case we have to adjust according to where the
  11454. section symbol winds up in the output section. */
  11455. if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
  11456. {
  11457. if (globals->use_rel)
  11458. arm_add_to_rel (input_bfd, contents + rel->r_offset,
  11459. howto, (bfd_signed_vma) sec->output_offset);
  11460. else
  11461. rel->r_addend += sec->output_offset;
  11462. }
  11463. continue;
  11464. }
  11465. if (h != NULL)
  11466. name = h->root.root.string;
  11467. else
  11468. {
  11469. name = (bfd_elf_string_from_elf_section
  11470. (input_bfd, symtab_hdr->sh_link, sym->st_name));
  11471. if (name == NULL || *name == '\0')
  11472. name = bfd_section_name (sec);
  11473. }
  11474. if (r_symndx != STN_UNDEF
  11475. && r_type != R_ARM_NONE
  11476. && (h == NULL
  11477. || h->root.type == bfd_link_hash_defined
  11478. || h->root.type == bfd_link_hash_defweak)
  11479. && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
  11480. {
  11481. _bfd_error_handler
  11482. ((sym_type == STT_TLS
  11483. /* xgettext:c-format */
  11484. ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
  11485. /* xgettext:c-format */
  11486. : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
  11487. input_bfd,
  11488. input_section,
  11489. (uint64_t) rel->r_offset,
  11490. howto->name,
  11491. name);
  11492. }
  11493. /* We call elf32_arm_final_link_relocate unless we're completely
  11494. done, i.e., the relaxation produced the final output we want,
  11495. and we won't let anybody mess with it. Also, we have to do
  11496. addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
  11497. both in relaxed and non-relaxed cases. */
  11498. if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
  11499. || (IS_ARM_TLS_GNU_RELOC (r_type)
  11500. && !((h ? elf32_arm_hash_entry (h)->tls_type :
  11501. elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
  11502. & GOT_TLS_GDESC)))
  11503. {
  11504. r = elf32_arm_tls_relax (globals, input_bfd, input_section,
  11505. contents, rel, h == NULL);
  11506. /* This may have been marked unresolved because it came from
  11507. a shared library. But we've just dealt with that. */
  11508. unresolved_reloc = 0;
  11509. }
  11510. else
  11511. r = bfd_reloc_continue;
  11512. if (r == bfd_reloc_continue)
  11513. {
  11514. unsigned char branch_type =
  11515. h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
  11516. : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
  11517. r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
  11518. input_section, contents, rel,
  11519. relocation, info, sec, name,
  11520. sym_type, branch_type, h,
  11521. &unresolved_reloc,
  11522. &error_message);
  11523. }
  11524. /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
  11525. because such sections are not SEC_ALLOC and thus ld.so will
  11526. not process them. */
  11527. if (unresolved_reloc
  11528. && !((input_section->flags & SEC_DEBUGGING) != 0
  11529. && h->def_dynamic)
  11530. && _bfd_elf_section_offset (output_bfd, info, input_section,
  11531. rel->r_offset) != (bfd_vma) -1)
  11532. {
  11533. _bfd_error_handler
  11534. /* xgettext:c-format */
  11535. (_("%pB(%pA+%#" PRIx64 "): "
  11536. "unresolvable %s relocation against symbol `%s'"),
  11537. input_bfd,
  11538. input_section,
  11539. (uint64_t) rel->r_offset,
  11540. howto->name,
  11541. h->root.root.string);
  11542. return false;
  11543. }
  11544. if (r != bfd_reloc_ok)
  11545. {
  11546. switch (r)
  11547. {
  11548. case bfd_reloc_overflow:
  11549. /* If the overflowing reloc was to an undefined symbol,
  11550. we have already printed one error message and there
  11551. is no point complaining again. */
  11552. if (!h || h->root.type != bfd_link_hash_undefined)
  11553. (*info->callbacks->reloc_overflow)
  11554. (info, (h ? &h->root : NULL), name, howto->name,
  11555. (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
  11556. break;
  11557. case bfd_reloc_undefined:
  11558. (*info->callbacks->undefined_symbol)
  11559. (info, name, input_bfd, input_section, rel->r_offset, true);
  11560. break;
  11561. case bfd_reloc_outofrange:
  11562. error_message = _("out of range");
  11563. goto common_error;
  11564. case bfd_reloc_notsupported:
  11565. error_message = _("unsupported relocation");
  11566. goto common_error;
  11567. case bfd_reloc_dangerous:
  11568. /* error_message should already be set. */
  11569. goto common_error;
  11570. default:
  11571. error_message = _("unknown error");
  11572. /* Fall through. */
  11573. common_error:
  11574. BFD_ASSERT (error_message != NULL);
  11575. (*info->callbacks->reloc_dangerous)
  11576. (info, error_message, input_bfd, input_section, rel->r_offset);
  11577. break;
  11578. }
  11579. }
  11580. }
  11581. return true;
  11582. }
  11583. /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
  11584. adds the edit to the start of the list. (The list must be built in order of
  11585. ascending TINDEX: the function's callers are primarily responsible for
  11586. maintaining that condition). */
  11587. static void
  11588. add_unwind_table_edit (arm_unwind_table_edit **head,
  11589. arm_unwind_table_edit **tail,
  11590. arm_unwind_edit_type type,
  11591. asection *linked_section,
  11592. unsigned int tindex)
  11593. {
  11594. arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
  11595. xmalloc (sizeof (arm_unwind_table_edit));
  11596. new_edit->type = type;
  11597. new_edit->linked_section = linked_section;
  11598. new_edit->index = tindex;
  11599. if (tindex > 0)
  11600. {
  11601. new_edit->next = NULL;
  11602. if (*tail)
  11603. (*tail)->next = new_edit;
  11604. (*tail) = new_edit;
  11605. if (!*head)
  11606. (*head) = new_edit;
  11607. }
  11608. else
  11609. {
  11610. new_edit->next = *head;
  11611. if (!*tail)
  11612. *tail = new_edit;
  11613. *head = new_edit;
  11614. }
  11615. }
  11616. static _arm_elf_section_data *get_arm_elf_section_data (asection *);
  11617. /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
  11618. static void
  11619. adjust_exidx_size (asection *exidx_sec, int adjust)
  11620. {
  11621. asection *out_sec;
  11622. if (!exidx_sec->rawsize)
  11623. exidx_sec->rawsize = exidx_sec->size;
  11624. bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
  11625. out_sec = exidx_sec->output_section;
  11626. /* Adjust size of output section. */
  11627. bfd_set_section_size (out_sec, out_sec->size + adjust);
  11628. }
  11629. /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
  11630. static void
  11631. insert_cantunwind_after (asection *text_sec, asection *exidx_sec)
  11632. {
  11633. struct _arm_elf_section_data *exidx_arm_data;
  11634. exidx_arm_data = get_arm_elf_section_data (exidx_sec);
  11635. add_unwind_table_edit
  11636. (&exidx_arm_data->u.exidx.unwind_edit_list,
  11637. &exidx_arm_data->u.exidx.unwind_edit_tail,
  11638. INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
  11639. exidx_arm_data->additional_reloc_count++;
  11640. adjust_exidx_size (exidx_sec, 8);
  11641. }
  11642. /* Scan .ARM.exidx tables, and create a list describing edits which should be
  11643. made to those tables, such that:
  11644. 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
  11645. 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
  11646. codes which have been inlined into the index).
  11647. If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
  11648. The edits are applied when the tables are written
  11649. (in elf32_arm_write_section). */
  11650. bool
  11651. elf32_arm_fix_exidx_coverage (asection **text_section_order,
  11652. unsigned int num_text_sections,
  11653. struct bfd_link_info *info,
  11654. bool merge_exidx_entries)
  11655. {
  11656. bfd *inp;
  11657. unsigned int last_second_word = 0, i;
  11658. asection *last_exidx_sec = NULL;
  11659. asection *last_text_sec = NULL;
  11660. int last_unwind_type = -1;
  11661. /* Walk over all EXIDX sections, and create backlinks from the corrsponding
  11662. text sections. */
  11663. for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
  11664. {
  11665. asection *sec;
  11666. for (sec = inp->sections; sec != NULL; sec = sec->next)
  11667. {
  11668. struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
  11669. Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
  11670. if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
  11671. continue;
  11672. if (elf_sec->linked_to)
  11673. {
  11674. Elf_Internal_Shdr *linked_hdr
  11675. = &elf_section_data (elf_sec->linked_to)->this_hdr;
  11676. struct _arm_elf_section_data *linked_sec_arm_data
  11677. = get_arm_elf_section_data (linked_hdr->bfd_section);
  11678. if (linked_sec_arm_data == NULL)
  11679. continue;
  11680. /* Link this .ARM.exidx section back from the text section it
  11681. describes. */
  11682. linked_sec_arm_data->u.text.arm_exidx_sec = sec;
  11683. }
  11684. }
  11685. }
  11686. /* Walk all text sections in order of increasing VMA. Eilminate duplicate
  11687. index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
  11688. and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
  11689. for (i = 0; i < num_text_sections; i++)
  11690. {
  11691. asection *sec = text_section_order[i];
  11692. asection *exidx_sec;
  11693. struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
  11694. struct _arm_elf_section_data *exidx_arm_data;
  11695. bfd_byte *contents = NULL;
  11696. int deleted_exidx_bytes = 0;
  11697. bfd_vma j;
  11698. arm_unwind_table_edit *unwind_edit_head = NULL;
  11699. arm_unwind_table_edit *unwind_edit_tail = NULL;
  11700. Elf_Internal_Shdr *hdr;
  11701. bfd *ibfd;
  11702. if (arm_data == NULL)
  11703. continue;
  11704. exidx_sec = arm_data->u.text.arm_exidx_sec;
  11705. if (exidx_sec == NULL)
  11706. {
  11707. /* Section has no unwind data. */
  11708. if (last_unwind_type == 0 || !last_exidx_sec)
  11709. continue;
  11710. /* Ignore zero sized sections. */
  11711. if (sec->size == 0)
  11712. continue;
  11713. insert_cantunwind_after (last_text_sec, last_exidx_sec);
  11714. last_unwind_type = 0;
  11715. continue;
  11716. }
  11717. /* Skip /DISCARD/ sections. */
  11718. if (bfd_is_abs_section (exidx_sec->output_section))
  11719. continue;
  11720. hdr = &elf_section_data (exidx_sec)->this_hdr;
  11721. if (hdr->sh_type != SHT_ARM_EXIDX)
  11722. continue;
  11723. exidx_arm_data = get_arm_elf_section_data (exidx_sec);
  11724. if (exidx_arm_data == NULL)
  11725. continue;
  11726. ibfd = exidx_sec->owner;
  11727. if (hdr->contents != NULL)
  11728. contents = hdr->contents;
  11729. else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
  11730. /* An error? */
  11731. continue;
  11732. if (last_unwind_type > 0)
  11733. {
  11734. unsigned int first_word = bfd_get_32 (ibfd, contents);
  11735. /* Add cantunwind if first unwind item does not match section
  11736. start. */
  11737. if (first_word != sec->vma)
  11738. {
  11739. insert_cantunwind_after (last_text_sec, last_exidx_sec);
  11740. last_unwind_type = 0;
  11741. }
  11742. }
  11743. for (j = 0; j < hdr->sh_size; j += 8)
  11744. {
  11745. unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
  11746. int unwind_type;
  11747. int elide = 0;
  11748. /* An EXIDX_CANTUNWIND entry. */
  11749. if (second_word == 1)
  11750. {
  11751. if (last_unwind_type == 0)
  11752. elide = 1;
  11753. unwind_type = 0;
  11754. }
  11755. /* Inlined unwinding data. Merge if equal to previous. */
  11756. else if ((second_word & 0x80000000) != 0)
  11757. {
  11758. if (merge_exidx_entries
  11759. && last_second_word == second_word && last_unwind_type == 1)
  11760. elide = 1;
  11761. unwind_type = 1;
  11762. last_second_word = second_word;
  11763. }
  11764. /* Normal table entry. In theory we could merge these too,
  11765. but duplicate entries are likely to be much less common. */
  11766. else
  11767. unwind_type = 2;
  11768. if (elide && !bfd_link_relocatable (info))
  11769. {
  11770. add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
  11771. DELETE_EXIDX_ENTRY, NULL, j / 8);
  11772. deleted_exidx_bytes += 8;
  11773. }
  11774. last_unwind_type = unwind_type;
  11775. }
  11776. /* Free contents if we allocated it ourselves. */
  11777. if (contents != hdr->contents)
  11778. free (contents);
  11779. /* Record edits to be applied later (in elf32_arm_write_section). */
  11780. exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
  11781. exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
  11782. if (deleted_exidx_bytes > 0)
  11783. adjust_exidx_size (exidx_sec, - deleted_exidx_bytes);
  11784. last_exidx_sec = exidx_sec;
  11785. last_text_sec = sec;
  11786. }
  11787. /* Add terminating CANTUNWIND entry. */
  11788. if (!bfd_link_relocatable (info) && last_exidx_sec
  11789. && last_unwind_type != 0)
  11790. insert_cantunwind_after (last_text_sec, last_exidx_sec);
  11791. return true;
  11792. }
  11793. static bool
  11794. elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
  11795. bfd *ibfd, const char *name)
  11796. {
  11797. asection *sec, *osec;
  11798. sec = bfd_get_linker_section (ibfd, name);
  11799. if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
  11800. return true;
  11801. osec = sec->output_section;
  11802. if (elf32_arm_write_section (obfd, info, sec, sec->contents))
  11803. return true;
  11804. if (! bfd_set_section_contents (obfd, osec, sec->contents,
  11805. sec->output_offset, sec->size))
  11806. return false;
  11807. return true;
  11808. }
  11809. static bool
  11810. elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
  11811. {
  11812. struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
  11813. asection *sec, *osec;
  11814. if (globals == NULL)
  11815. return false;
  11816. /* Invoke the regular ELF backend linker to do all the work. */
  11817. if (!bfd_elf_final_link (abfd, info))
  11818. return false;
  11819. /* Process stub sections (eg BE8 encoding, ...). */
  11820. struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
  11821. unsigned int i;
  11822. for (i=0; i<htab->top_id; i++)
  11823. {
  11824. sec = htab->stub_group[i].stub_sec;
  11825. /* Only process it once, in its link_sec slot. */
  11826. if (sec && i == htab->stub_group[i].link_sec->id)
  11827. {
  11828. osec = sec->output_section;
  11829. elf32_arm_write_section (abfd, info, sec, sec->contents);
  11830. if (! bfd_set_section_contents (abfd, osec, sec->contents,
  11831. sec->output_offset, sec->size))
  11832. return false;
  11833. }
  11834. }
  11835. /* Write out any glue sections now that we have created all the
  11836. stubs. */
  11837. if (globals->bfd_of_glue_owner != NULL)
  11838. {
  11839. if (! elf32_arm_output_glue_section (info, abfd,
  11840. globals->bfd_of_glue_owner,
  11841. ARM2THUMB_GLUE_SECTION_NAME))
  11842. return false;
  11843. if (! elf32_arm_output_glue_section (info, abfd,
  11844. globals->bfd_of_glue_owner,
  11845. THUMB2ARM_GLUE_SECTION_NAME))
  11846. return false;
  11847. if (! elf32_arm_output_glue_section (info, abfd,
  11848. globals->bfd_of_glue_owner,
  11849. VFP11_ERRATUM_VENEER_SECTION_NAME))
  11850. return false;
  11851. if (! elf32_arm_output_glue_section (info, abfd,
  11852. globals->bfd_of_glue_owner,
  11853. STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
  11854. return false;
  11855. if (! elf32_arm_output_glue_section (info, abfd,
  11856. globals->bfd_of_glue_owner,
  11857. ARM_BX_GLUE_SECTION_NAME))
  11858. return false;
  11859. }
  11860. return true;
  11861. }
  11862. /* Return a best guess for the machine number based on the attributes. */
  11863. static unsigned int
  11864. bfd_arm_get_mach_from_attributes (bfd * abfd)
  11865. {
  11866. int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
  11867. switch (arch)
  11868. {
  11869. case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
  11870. case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
  11871. case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
  11872. case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
  11873. case TAG_CPU_ARCH_V5TE:
  11874. {
  11875. char * name;
  11876. BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
  11877. name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
  11878. if (name)
  11879. {
  11880. if (strcmp (name, "IWMMXT2") == 0)
  11881. return bfd_mach_arm_iWMMXt2;
  11882. if (strcmp (name, "IWMMXT") == 0)
  11883. return bfd_mach_arm_iWMMXt;
  11884. if (strcmp (name, "XSCALE") == 0)
  11885. {
  11886. int wmmx;
  11887. BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
  11888. wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
  11889. switch (wmmx)
  11890. {
  11891. case 1: return bfd_mach_arm_iWMMXt;
  11892. case 2: return bfd_mach_arm_iWMMXt2;
  11893. default: return bfd_mach_arm_XScale;
  11894. }
  11895. }
  11896. }
  11897. return bfd_mach_arm_5TE;
  11898. }
  11899. case TAG_CPU_ARCH_V5TEJ:
  11900. return bfd_mach_arm_5TEJ;
  11901. case TAG_CPU_ARCH_V6:
  11902. return bfd_mach_arm_6;
  11903. case TAG_CPU_ARCH_V6KZ:
  11904. return bfd_mach_arm_6KZ;
  11905. case TAG_CPU_ARCH_V6T2:
  11906. return bfd_mach_arm_6T2;
  11907. case TAG_CPU_ARCH_V6K:
  11908. return bfd_mach_arm_6K;
  11909. case TAG_CPU_ARCH_V7:
  11910. return bfd_mach_arm_7;
  11911. case TAG_CPU_ARCH_V6_M:
  11912. return bfd_mach_arm_6M;
  11913. case TAG_CPU_ARCH_V6S_M:
  11914. return bfd_mach_arm_6SM;
  11915. case TAG_CPU_ARCH_V7E_M:
  11916. return bfd_mach_arm_7EM;
  11917. case TAG_CPU_ARCH_V8:
  11918. return bfd_mach_arm_8;
  11919. case TAG_CPU_ARCH_V8R:
  11920. return bfd_mach_arm_8R;
  11921. case TAG_CPU_ARCH_V8M_BASE:
  11922. return bfd_mach_arm_8M_BASE;
  11923. case TAG_CPU_ARCH_V8M_MAIN:
  11924. return bfd_mach_arm_8M_MAIN;
  11925. case TAG_CPU_ARCH_V8_1M_MAIN:
  11926. return bfd_mach_arm_8_1M_MAIN;
  11927. case TAG_CPU_ARCH_V9:
  11928. return bfd_mach_arm_9;
  11929. default:
  11930. /* Force entry to be added for any new known Tag_CPU_arch value. */
  11931. BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
  11932. /* Unknown Tag_CPU_arch value. */
  11933. return bfd_mach_arm_unknown;
  11934. }
  11935. }
  11936. /* Set the right machine number. */
  11937. static bool
  11938. elf32_arm_object_p (bfd *abfd)
  11939. {
  11940. unsigned int mach;
  11941. mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
  11942. if (mach == bfd_mach_arm_unknown)
  11943. {
  11944. if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
  11945. mach = bfd_mach_arm_ep9312;
  11946. else
  11947. mach = bfd_arm_get_mach_from_attributes (abfd);
  11948. }
  11949. bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
  11950. return true;
  11951. }
  11952. /* Function to keep ARM specific flags in the ELF header. */
  11953. static bool
  11954. elf32_arm_set_private_flags (bfd *abfd, flagword flags)
  11955. {
  11956. if (elf_flags_init (abfd)
  11957. && elf_elfheader (abfd)->e_flags != flags)
  11958. {
  11959. if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
  11960. {
  11961. if (flags & EF_ARM_INTERWORK)
  11962. _bfd_error_handler
  11963. (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
  11964. abfd);
  11965. else
  11966. _bfd_error_handler
  11967. (_("warning: clearing the interworking flag of %pB due to outside request"),
  11968. abfd);
  11969. }
  11970. }
  11971. else
  11972. {
  11973. elf_elfheader (abfd)->e_flags = flags;
  11974. elf_flags_init (abfd) = true;
  11975. }
  11976. return true;
  11977. }
  11978. /* Copy backend specific data from one object module to another. */
  11979. static bool
  11980. elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
  11981. {
  11982. flagword in_flags;
  11983. flagword out_flags;
  11984. if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
  11985. return true;
  11986. in_flags = elf_elfheader (ibfd)->e_flags;
  11987. out_flags = elf_elfheader (obfd)->e_flags;
  11988. if (elf_flags_init (obfd)
  11989. && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
  11990. && in_flags != out_flags)
  11991. {
  11992. /* Cannot mix APCS26 and APCS32 code. */
  11993. if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
  11994. return false;
  11995. /* Cannot mix float APCS and non-float APCS code. */
  11996. if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
  11997. return false;
  11998. /* If the src and dest have different interworking flags
  11999. then turn off the interworking bit. */
  12000. if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
  12001. {
  12002. if (out_flags & EF_ARM_INTERWORK)
  12003. _bfd_error_handler
  12004. (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
  12005. obfd, ibfd);
  12006. in_flags &= ~EF_ARM_INTERWORK;
  12007. }
  12008. /* Likewise for PIC, though don't warn for this case. */
  12009. if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
  12010. in_flags &= ~EF_ARM_PIC;
  12011. }
  12012. elf_elfheader (obfd)->e_flags = in_flags;
  12013. elf_flags_init (obfd) = true;
  12014. return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
  12015. }
  12016. /* Values for Tag_ABI_PCS_R9_use. */
  12017. enum
  12018. {
  12019. AEABI_R9_V6,
  12020. AEABI_R9_SB,
  12021. AEABI_R9_TLS,
  12022. AEABI_R9_unused
  12023. };
  12024. /* Values for Tag_ABI_PCS_RW_data. */
  12025. enum
  12026. {
  12027. AEABI_PCS_RW_data_absolute,
  12028. AEABI_PCS_RW_data_PCrel,
  12029. AEABI_PCS_RW_data_SBrel,
  12030. AEABI_PCS_RW_data_unused
  12031. };
  12032. /* Values for Tag_ABI_enum_size. */
  12033. enum
  12034. {
  12035. AEABI_enum_unused,
  12036. AEABI_enum_short,
  12037. AEABI_enum_wide,
  12038. AEABI_enum_forced_wide
  12039. };
  12040. /* Determine whether an object attribute tag takes an integer, a
  12041. string or both. */
  12042. static int
  12043. elf32_arm_obj_attrs_arg_type (int tag)
  12044. {
  12045. if (tag == Tag_compatibility)
  12046. return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
  12047. else if (tag == Tag_nodefaults)
  12048. return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
  12049. else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
  12050. return ATTR_TYPE_FLAG_STR_VAL;
  12051. else if (tag < 32)
  12052. return ATTR_TYPE_FLAG_INT_VAL;
  12053. else
  12054. return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
  12055. }
  12056. /* The ABI defines that Tag_conformance should be emitted first, and that
  12057. Tag_nodefaults should be second (if either is defined). This sets those
  12058. two positions, and bumps up the position of all the remaining tags to
  12059. compensate. */
  12060. static int
  12061. elf32_arm_obj_attrs_order (int num)
  12062. {
  12063. if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
  12064. return Tag_conformance;
  12065. if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
  12066. return Tag_nodefaults;
  12067. if ((num - 2) < Tag_nodefaults)
  12068. return num - 2;
  12069. if ((num - 1) < Tag_conformance)
  12070. return num - 1;
  12071. return num;
  12072. }
  12073. /* Attribute numbers >=64 (mod 128) can be safely ignored. */
  12074. static bool
  12075. elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
  12076. {
  12077. if ((tag & 127) < 64)
  12078. {
  12079. _bfd_error_handler
  12080. (_("%pB: unknown mandatory EABI object attribute %d"),
  12081. abfd, tag);
  12082. bfd_set_error (bfd_error_bad_value);
  12083. return false;
  12084. }
  12085. else
  12086. {
  12087. _bfd_error_handler
  12088. (_("warning: %pB: unknown EABI object attribute %d"),
  12089. abfd, tag);
  12090. return true;
  12091. }
  12092. }
  12093. /* Read the architecture from the Tag_also_compatible_with attribute, if any.
  12094. Returns -1 if no architecture could be read. */
  12095. static int
  12096. get_secondary_compatible_arch (bfd *abfd)
  12097. {
  12098. obj_attribute *attr =
  12099. &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
  12100. /* Note: the tag and its argument below are uleb128 values, though
  12101. currently-defined values fit in one byte for each. */
  12102. if (attr->s
  12103. && attr->s[0] == Tag_CPU_arch
  12104. && (attr->s[1] & 128) != 128
  12105. && attr->s[2] == 0)
  12106. return attr->s[1];
  12107. /* This tag is "safely ignorable", so don't complain if it looks funny. */
  12108. return -1;
  12109. }
  12110. /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
  12111. The tag is removed if ARCH is -1. */
  12112. static void
  12113. set_secondary_compatible_arch (bfd *abfd, int arch)
  12114. {
  12115. obj_attribute *attr =
  12116. &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
  12117. if (arch == -1)
  12118. {
  12119. attr->s = NULL;
  12120. return;
  12121. }
  12122. /* Note: the tag and its argument below are uleb128 values, though
  12123. currently-defined values fit in one byte for each. */
  12124. if (!attr->s)
  12125. attr->s = (char *) bfd_alloc (abfd, 3);
  12126. attr->s[0] = Tag_CPU_arch;
  12127. attr->s[1] = arch;
  12128. attr->s[2] = '\0';
  12129. }
  12130. /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
  12131. into account. */
  12132. static int
  12133. tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
  12134. int newtag, int secondary_compat)
  12135. {
  12136. #define T(X) TAG_CPU_ARCH_##X
  12137. int tagl, tagh, result;
  12138. const int v6t2[] =
  12139. {
  12140. T(V6T2), /* PRE_V4. */
  12141. T(V6T2), /* V4. */
  12142. T(V6T2), /* V4T. */
  12143. T(V6T2), /* V5T. */
  12144. T(V6T2), /* V5TE. */
  12145. T(V6T2), /* V5TEJ. */
  12146. T(V6T2), /* V6. */
  12147. T(V7), /* V6KZ. */
  12148. T(V6T2) /* V6T2. */
  12149. };
  12150. const int v6k[] =
  12151. {
  12152. T(V6K), /* PRE_V4. */
  12153. T(V6K), /* V4. */
  12154. T(V6K), /* V4T. */
  12155. T(V6K), /* V5T. */
  12156. T(V6K), /* V5TE. */
  12157. T(V6K), /* V5TEJ. */
  12158. T(V6K), /* V6. */
  12159. T(V6KZ), /* V6KZ. */
  12160. T(V7), /* V6T2. */
  12161. T(V6K) /* V6K. */
  12162. };
  12163. const int v7[] =
  12164. {
  12165. T(V7), /* PRE_V4. */
  12166. T(V7), /* V4. */
  12167. T(V7), /* V4T. */
  12168. T(V7), /* V5T. */
  12169. T(V7), /* V5TE. */
  12170. T(V7), /* V5TEJ. */
  12171. T(V7), /* V6. */
  12172. T(V7), /* V6KZ. */
  12173. T(V7), /* V6T2. */
  12174. T(V7), /* V6K. */
  12175. T(V7) /* V7. */
  12176. };
  12177. const int v6_m[] =
  12178. {
  12179. -1, /* PRE_V4. */
  12180. -1, /* V4. */
  12181. T(V6K), /* V4T. */
  12182. T(V6K), /* V5T. */
  12183. T(V6K), /* V5TE. */
  12184. T(V6K), /* V5TEJ. */
  12185. T(V6K), /* V6. */
  12186. T(V6KZ), /* V6KZ. */
  12187. T(V7), /* V6T2. */
  12188. T(V6K), /* V6K. */
  12189. T(V7), /* V7. */
  12190. T(V6_M) /* V6_M. */
  12191. };
  12192. const int v6s_m[] =
  12193. {
  12194. -1, /* PRE_V4. */
  12195. -1, /* V4. */
  12196. T(V6K), /* V4T. */
  12197. T(V6K), /* V5T. */
  12198. T(V6K), /* V5TE. */
  12199. T(V6K), /* V5TEJ. */
  12200. T(V6K), /* V6. */
  12201. T(V6KZ), /* V6KZ. */
  12202. T(V7), /* V6T2. */
  12203. T(V6K), /* V6K. */
  12204. T(V7), /* V7. */
  12205. T(V6S_M), /* V6_M. */
  12206. T(V6S_M) /* V6S_M. */
  12207. };
  12208. const int v7e_m[] =
  12209. {
  12210. -1, /* PRE_V4. */
  12211. -1, /* V4. */
  12212. T(V7E_M), /* V4T. */
  12213. T(V7E_M), /* V5T. */
  12214. T(V7E_M), /* V5TE. */
  12215. T(V7E_M), /* V5TEJ. */
  12216. T(V7E_M), /* V6. */
  12217. T(V7E_M), /* V6KZ. */
  12218. T(V7E_M), /* V6T2. */
  12219. T(V7E_M), /* V6K. */
  12220. T(V7E_M), /* V7. */
  12221. T(V7E_M), /* V6_M. */
  12222. T(V7E_M), /* V6S_M. */
  12223. T(V7E_M) /* V7E_M. */
  12224. };
  12225. const int v8[] =
  12226. {
  12227. T(V8), /* PRE_V4. */
  12228. T(V8), /* V4. */
  12229. T(V8), /* V4T. */
  12230. T(V8), /* V5T. */
  12231. T(V8), /* V5TE. */
  12232. T(V8), /* V5TEJ. */
  12233. T(V8), /* V6. */
  12234. T(V8), /* V6KZ. */
  12235. T(V8), /* V6T2. */
  12236. T(V8), /* V6K. */
  12237. T(V8), /* V7. */
  12238. T(V8), /* V6_M. */
  12239. T(V8), /* V6S_M. */
  12240. T(V8), /* V7E_M. */
  12241. T(V8), /* V8. */
  12242. T(V8), /* V8-R. */
  12243. T(V8), /* V8-M.BASE. */
  12244. T(V8), /* V8-M.MAIN. */
  12245. T(V8), /* V8.1. */
  12246. T(V8), /* V8.2. */
  12247. T(V8), /* V8.3. */
  12248. T(V8), /* V8.1-M.MAIN. */
  12249. };
  12250. const int v8r[] =
  12251. {
  12252. T(V8R), /* PRE_V4. */
  12253. T(V8R), /* V4. */
  12254. T(V8R), /* V4T. */
  12255. T(V8R), /* V5T. */
  12256. T(V8R), /* V5TE. */
  12257. T(V8R), /* V5TEJ. */
  12258. T(V8R), /* V6. */
  12259. T(V8R), /* V6KZ. */
  12260. T(V8R), /* V6T2. */
  12261. T(V8R), /* V6K. */
  12262. T(V8R), /* V7. */
  12263. T(V8R), /* V6_M. */
  12264. T(V8R), /* V6S_M. */
  12265. T(V8R), /* V7E_M. */
  12266. T(V8), /* V8. */
  12267. T(V8R), /* V8R. */
  12268. };
  12269. const int v8m_baseline[] =
  12270. {
  12271. -1, /* PRE_V4. */
  12272. -1, /* V4. */
  12273. -1, /* V4T. */
  12274. -1, /* V5T. */
  12275. -1, /* V5TE. */
  12276. -1, /* V5TEJ. */
  12277. -1, /* V6. */
  12278. -1, /* V6KZ. */
  12279. -1, /* V6T2. */
  12280. -1, /* V6K. */
  12281. -1, /* V7. */
  12282. T(V8M_BASE), /* V6_M. */
  12283. T(V8M_BASE), /* V6S_M. */
  12284. -1, /* V7E_M. */
  12285. -1, /* V8. */
  12286. -1, /* V8R. */
  12287. T(V8M_BASE) /* V8-M BASELINE. */
  12288. };
  12289. const int v8m_mainline[] =
  12290. {
  12291. -1, /* PRE_V4. */
  12292. -1, /* V4. */
  12293. -1, /* V4T. */
  12294. -1, /* V5T. */
  12295. -1, /* V5TE. */
  12296. -1, /* V5TEJ. */
  12297. -1, /* V6. */
  12298. -1, /* V6KZ. */
  12299. -1, /* V6T2. */
  12300. -1, /* V6K. */
  12301. T(V8M_MAIN), /* V7. */
  12302. T(V8M_MAIN), /* V6_M. */
  12303. T(V8M_MAIN), /* V6S_M. */
  12304. T(V8M_MAIN), /* V7E_M. */
  12305. -1, /* V8. */
  12306. -1, /* V8R. */
  12307. T(V8M_MAIN), /* V8-M BASELINE. */
  12308. T(V8M_MAIN) /* V8-M MAINLINE. */
  12309. };
  12310. const int v8_1m_mainline[] =
  12311. {
  12312. -1, /* PRE_V4. */
  12313. -1, /* V4. */
  12314. -1, /* V4T. */
  12315. -1, /* V5T. */
  12316. -1, /* V5TE. */
  12317. -1, /* V5TEJ. */
  12318. -1, /* V6. */
  12319. -1, /* V6KZ. */
  12320. -1, /* V6T2. */
  12321. -1, /* V6K. */
  12322. T(V8_1M_MAIN), /* V7. */
  12323. T(V8_1M_MAIN), /* V6_M. */
  12324. T(V8_1M_MAIN), /* V6S_M. */
  12325. T(V8_1M_MAIN), /* V7E_M. */
  12326. -1, /* V8. */
  12327. -1, /* V8R. */
  12328. T(V8_1M_MAIN), /* V8-M BASELINE. */
  12329. T(V8_1M_MAIN), /* V8-M MAINLINE. */
  12330. -1, /* Unused (18). */
  12331. -1, /* Unused (19). */
  12332. -1, /* Unused (20). */
  12333. T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
  12334. };
  12335. const int v9[] =
  12336. {
  12337. T(V9), /* PRE_V4. */
  12338. T(V9), /* V4. */
  12339. T(V9), /* V4T. */
  12340. T(V9), /* V5T. */
  12341. T(V9), /* V5TE. */
  12342. T(V9), /* V5TEJ. */
  12343. T(V9), /* V6. */
  12344. T(V9), /* V6KZ. */
  12345. T(V9), /* V6T2. */
  12346. T(V9), /* V6K. */
  12347. T(V9), /* V7. */
  12348. T(V9), /* V6_M. */
  12349. T(V9), /* V6S_M. */
  12350. T(V9), /* V7E_M. */
  12351. T(V9), /* V8. */
  12352. T(V9), /* V8-R. */
  12353. T(V9), /* V8-M.BASE. */
  12354. T(V9), /* V8-M.MAIN. */
  12355. T(V9), /* V8.1. */
  12356. T(V9), /* V8.2. */
  12357. T(V9), /* V8.3. */
  12358. T(V9), /* V8.1-M.MAIN. */
  12359. T(V9), /* V9. */
  12360. };
  12361. const int v4t_plus_v6_m[] =
  12362. {
  12363. -1, /* PRE_V4. */
  12364. -1, /* V4. */
  12365. T(V4T), /* V4T. */
  12366. T(V5T), /* V5T. */
  12367. T(V5TE), /* V5TE. */
  12368. T(V5TEJ), /* V5TEJ. */
  12369. T(V6), /* V6. */
  12370. T(V6KZ), /* V6KZ. */
  12371. T(V6T2), /* V6T2. */
  12372. T(V6K), /* V6K. */
  12373. T(V7), /* V7. */
  12374. T(V6_M), /* V6_M. */
  12375. T(V6S_M), /* V6S_M. */
  12376. T(V7E_M), /* V7E_M. */
  12377. T(V8), /* V8. */
  12378. -1, /* V8R. */
  12379. T(V8M_BASE), /* V8-M BASELINE. */
  12380. T(V8M_MAIN), /* V8-M MAINLINE. */
  12381. -1, /* Unused (18). */
  12382. -1, /* Unused (19). */
  12383. -1, /* Unused (20). */
  12384. T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
  12385. T(V9), /* V9. */
  12386. T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
  12387. };
  12388. const int *comb[] =
  12389. {
  12390. v6t2,
  12391. v6k,
  12392. v7,
  12393. v6_m,
  12394. v6s_m,
  12395. v7e_m,
  12396. v8,
  12397. v8r,
  12398. v8m_baseline,
  12399. v8m_mainline,
  12400. NULL,
  12401. NULL,
  12402. NULL,
  12403. v8_1m_mainline,
  12404. v9,
  12405. /* Pseudo-architecture. */
  12406. v4t_plus_v6_m
  12407. };
  12408. /* Check we've not got a higher architecture than we know about. */
  12409. if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
  12410. {
  12411. _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
  12412. return -1;
  12413. }
  12414. /* Override old tag if we have a Tag_also_compatible_with on the output. */
  12415. if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
  12416. || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
  12417. oldtag = T(V4T_PLUS_V6_M);
  12418. /* And override the new tag if we have a Tag_also_compatible_with on the
  12419. input. */
  12420. if ((newtag == T(V6_M) && secondary_compat == T(V4T))
  12421. || (newtag == T(V4T) && secondary_compat == T(V6_M)))
  12422. newtag = T(V4T_PLUS_V6_M);
  12423. tagl = (oldtag < newtag) ? oldtag : newtag;
  12424. result = tagh = (oldtag > newtag) ? oldtag : newtag;
  12425. /* Architectures before V6KZ add features monotonically. */
  12426. if (tagh <= TAG_CPU_ARCH_V6KZ)
  12427. return result;
  12428. result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
  12429. /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
  12430. as the canonical version. */
  12431. if (result == T(V4T_PLUS_V6_M))
  12432. {
  12433. result = T(V4T);
  12434. *secondary_compat_out = T(V6_M);
  12435. }
  12436. else
  12437. *secondary_compat_out = -1;
  12438. if (result == -1)
  12439. {
  12440. _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
  12441. ibfd, oldtag, newtag);
  12442. return -1;
  12443. }
  12444. return result;
  12445. #undef T
  12446. }
  12447. /* Query attributes object to see if integer divide instructions may be
  12448. present in an object. */
  12449. static bool
  12450. elf32_arm_attributes_accept_div (const obj_attribute *attr)
  12451. {
  12452. int arch = attr[Tag_CPU_arch].i;
  12453. int profile = attr[Tag_CPU_arch_profile].i;
  12454. switch (attr[Tag_DIV_use].i)
  12455. {
  12456. case 0:
  12457. /* Integer divide allowed if instruction contained in archetecture. */
  12458. if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
  12459. return true;
  12460. else if (arch >= TAG_CPU_ARCH_V7E_M)
  12461. return true;
  12462. else
  12463. return false;
  12464. case 1:
  12465. /* Integer divide explicitly prohibited. */
  12466. return false;
  12467. default:
  12468. /* Unrecognised case - treat as allowing divide everywhere. */
  12469. case 2:
  12470. /* Integer divide allowed in ARM state. */
  12471. return true;
  12472. }
  12473. }
  12474. /* Query attributes object to see if integer divide instructions are
  12475. forbidden to be in the object. This is not the inverse of
  12476. elf32_arm_attributes_accept_div. */
  12477. static bool
  12478. elf32_arm_attributes_forbid_div (const obj_attribute *attr)
  12479. {
  12480. return attr[Tag_DIV_use].i == 1;
  12481. }
  12482. /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
  12483. are conflicting attributes. */
  12484. static bool
  12485. elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
  12486. {
  12487. bfd *obfd = info->output_bfd;
  12488. obj_attribute *in_attr;
  12489. obj_attribute *out_attr;
  12490. /* Some tags have 0 = don't care, 1 = strong requirement,
  12491. 2 = weak requirement. */
  12492. static const int order_021[3] = {0, 2, 1};
  12493. int i;
  12494. bool result = true;
  12495. const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
  12496. /* Skip the linker stubs file. This preserves previous behavior
  12497. of accepting unknown attributes in the first input file - but
  12498. is that a bug? */
  12499. if (ibfd->flags & BFD_LINKER_CREATED)
  12500. return true;
  12501. /* Skip any input that hasn't attribute section.
  12502. This enables to link object files without attribute section with
  12503. any others. */
  12504. if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
  12505. return true;
  12506. if (!elf_known_obj_attributes_proc (obfd)[0].i)
  12507. {
  12508. /* This is the first object. Copy the attributes. */
  12509. _bfd_elf_copy_obj_attributes (ibfd, obfd);
  12510. out_attr = elf_known_obj_attributes_proc (obfd);
  12511. /* Use the Tag_null value to indicate the attributes have been
  12512. initialized. */
  12513. out_attr[0].i = 1;
  12514. /* We do not output objects with Tag_MPextension_use_legacy - we move
  12515. the attribute's value to Tag_MPextension_use. */
  12516. if (out_attr[Tag_MPextension_use_legacy].i != 0)
  12517. {
  12518. if (out_attr[Tag_MPextension_use].i != 0
  12519. && out_attr[Tag_MPextension_use_legacy].i
  12520. != out_attr[Tag_MPextension_use].i)
  12521. {
  12522. _bfd_error_handler
  12523. (_("Error: %pB has both the current and legacy "
  12524. "Tag_MPextension_use attributes"), ibfd);
  12525. result = false;
  12526. }
  12527. out_attr[Tag_MPextension_use] =
  12528. out_attr[Tag_MPextension_use_legacy];
  12529. out_attr[Tag_MPextension_use_legacy].type = 0;
  12530. out_attr[Tag_MPextension_use_legacy].i = 0;
  12531. }
  12532. /* PR 28859 and 28848: Handle the case where the first input file,
  12533. eg crti.o, has a Tag_ABI_HardFP_use of 3 but no Tag_FP_arch set.
  12534. Using Tag_ABI_HardFP_use in this way is deprecated, so reset the
  12535. attribute to zero.
  12536. FIXME: Should we handle other non-zero values of Tag_ABI_HardFO_use ? */
  12537. if (out_attr[Tag_ABI_HardFP_use].i == 3 && out_attr[Tag_FP_arch].i == 0)
  12538. out_attr[Tag_ABI_HardFP_use].i = 0;
  12539. return result;
  12540. }
  12541. in_attr = elf_known_obj_attributes_proc (ibfd);
  12542. out_attr = elf_known_obj_attributes_proc (obfd);
  12543. /* This needs to happen before Tag_ABI_FP_number_model is merged. */
  12544. if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
  12545. {
  12546. /* Ignore mismatches if the object doesn't use floating point or is
  12547. floating point ABI independent. */
  12548. if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
  12549. || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
  12550. && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
  12551. out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
  12552. else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
  12553. && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
  12554. {
  12555. _bfd_error_handler
  12556. (_("error: %pB uses VFP register arguments, %pB does not"),
  12557. in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
  12558. in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
  12559. result = false;
  12560. }
  12561. }
  12562. for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
  12563. {
  12564. /* Merge this attribute with existing attributes. */
  12565. switch (i)
  12566. {
  12567. case Tag_CPU_raw_name:
  12568. case Tag_CPU_name:
  12569. /* These are merged after Tag_CPU_arch. */
  12570. break;
  12571. case Tag_ABI_optimization_goals:
  12572. case Tag_ABI_FP_optimization_goals:
  12573. /* Use the first value seen. */
  12574. break;
  12575. case Tag_CPU_arch:
  12576. {
  12577. int secondary_compat = -1, secondary_compat_out = -1;
  12578. unsigned int saved_out_attr = out_attr[i].i;
  12579. int arch_attr;
  12580. static const char *name_table[] =
  12581. {
  12582. /* These aren't real CPU names, but we can't guess
  12583. that from the architecture version alone. */
  12584. "Pre v4",
  12585. "ARM v4",
  12586. "ARM v4T",
  12587. "ARM v5T",
  12588. "ARM v5TE",
  12589. "ARM v5TEJ",
  12590. "ARM v6",
  12591. "ARM v6KZ",
  12592. "ARM v6T2",
  12593. "ARM v6K",
  12594. "ARM v7",
  12595. "ARM v6-M",
  12596. "ARM v6S-M",
  12597. "ARM v7E-M",
  12598. "ARM v8",
  12599. "ARM v8-R",
  12600. "ARM v8-M.baseline",
  12601. "ARM v8-M.mainline",
  12602. "ARM v8.1-A",
  12603. "ARM v8.2-A",
  12604. "ARM v8.3-A",
  12605. "ARM v8.1-M.mainline",
  12606. "ARM v9",
  12607. };
  12608. /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
  12609. secondary_compat = get_secondary_compatible_arch (ibfd);
  12610. secondary_compat_out = get_secondary_compatible_arch (obfd);
  12611. arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
  12612. &secondary_compat_out,
  12613. in_attr[i].i,
  12614. secondary_compat);
  12615. /* Return with error if failed to merge. */
  12616. if (arch_attr == -1)
  12617. return false;
  12618. out_attr[i].i = arch_attr;
  12619. set_secondary_compatible_arch (obfd, secondary_compat_out);
  12620. /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
  12621. if (out_attr[i].i == saved_out_attr)
  12622. ; /* Leave the names alone. */
  12623. else if (out_attr[i].i == in_attr[i].i)
  12624. {
  12625. /* The output architecture has been changed to match the
  12626. input architecture. Use the input names. */
  12627. out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
  12628. ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
  12629. : NULL;
  12630. out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
  12631. ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
  12632. : NULL;
  12633. }
  12634. else
  12635. {
  12636. out_attr[Tag_CPU_name].s = NULL;
  12637. out_attr[Tag_CPU_raw_name].s = NULL;
  12638. }
  12639. /* If we still don't have a value for Tag_CPU_name,
  12640. make one up now. Tag_CPU_raw_name remains blank. */
  12641. if (out_attr[Tag_CPU_name].s == NULL
  12642. && out_attr[i].i < ARRAY_SIZE (name_table))
  12643. out_attr[Tag_CPU_name].s =
  12644. _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
  12645. }
  12646. break;
  12647. case Tag_ARM_ISA_use:
  12648. case Tag_THUMB_ISA_use:
  12649. case Tag_WMMX_arch:
  12650. case Tag_Advanced_SIMD_arch:
  12651. /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
  12652. case Tag_ABI_FP_rounding:
  12653. case Tag_ABI_FP_exceptions:
  12654. case Tag_ABI_FP_user_exceptions:
  12655. case Tag_ABI_FP_number_model:
  12656. case Tag_FP_HP_extension:
  12657. case Tag_CPU_unaligned_access:
  12658. case Tag_T2EE_use:
  12659. case Tag_MPextension_use:
  12660. case Tag_MVE_arch:
  12661. case Tag_PAC_extension:
  12662. case Tag_BTI_extension:
  12663. case Tag_BTI_use:
  12664. case Tag_PACRET_use:
  12665. /* Use the largest value specified. */
  12666. if (in_attr[i].i > out_attr[i].i)
  12667. out_attr[i].i = in_attr[i].i;
  12668. break;
  12669. case Tag_ABI_align_preserved:
  12670. case Tag_ABI_PCS_RO_data:
  12671. /* Use the smallest value specified. */
  12672. if (in_attr[i].i < out_attr[i].i)
  12673. out_attr[i].i = in_attr[i].i;
  12674. break;
  12675. case Tag_ABI_align_needed:
  12676. if ((in_attr[i].i > 0 || out_attr[i].i > 0)
  12677. && (in_attr[Tag_ABI_align_preserved].i == 0
  12678. || out_attr[Tag_ABI_align_preserved].i == 0))
  12679. {
  12680. /* This error message should be enabled once all non-conformant
  12681. binaries in the toolchain have had the attributes set
  12682. properly.
  12683. _bfd_error_handler
  12684. (_("error: %pB: 8-byte data alignment conflicts with %pB"),
  12685. obfd, ibfd);
  12686. result = false; */
  12687. }
  12688. /* Fall through. */
  12689. case Tag_ABI_FP_denormal:
  12690. case Tag_ABI_PCS_GOT_use:
  12691. /* Use the "greatest" from the sequence 0, 2, 1, or the largest
  12692. value if greater than 2 (for future-proofing). */
  12693. if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
  12694. || (in_attr[i].i <= 2 && out_attr[i].i <= 2
  12695. && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
  12696. out_attr[i].i = in_attr[i].i;
  12697. break;
  12698. case Tag_Virtualization_use:
  12699. /* The virtualization tag effectively stores two bits of
  12700. information: the intended use of TrustZone (in bit 0), and the
  12701. intended use of Virtualization (in bit 1). */
  12702. if (out_attr[i].i == 0)
  12703. out_attr[i].i = in_attr[i].i;
  12704. else if (in_attr[i].i != 0
  12705. && in_attr[i].i != out_attr[i].i)
  12706. {
  12707. if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
  12708. out_attr[i].i = 3;
  12709. else
  12710. {
  12711. _bfd_error_handler
  12712. (_("error: %pB: unable to merge virtualization attributes "
  12713. "with %pB"),
  12714. obfd, ibfd);
  12715. result = false;
  12716. }
  12717. }
  12718. break;
  12719. case Tag_CPU_arch_profile:
  12720. if (out_attr[i].i != in_attr[i].i)
  12721. {
  12722. /* 0 will merge with anything.
  12723. 'A' and 'S' merge to 'A'.
  12724. 'R' and 'S' merge to 'R'.
  12725. 'M' and 'A|R|S' is an error. */
  12726. if (out_attr[i].i == 0
  12727. || (out_attr[i].i == 'S'
  12728. && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
  12729. out_attr[i].i = in_attr[i].i;
  12730. else if (in_attr[i].i == 0
  12731. || (in_attr[i].i == 'S'
  12732. && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
  12733. ; /* Do nothing. */
  12734. else
  12735. {
  12736. _bfd_error_handler
  12737. (_("error: %pB: conflicting architecture profiles %c/%c"),
  12738. ibfd,
  12739. in_attr[i].i ? in_attr[i].i : '0',
  12740. out_attr[i].i ? out_attr[i].i : '0');
  12741. result = false;
  12742. }
  12743. }
  12744. break;
  12745. case Tag_DSP_extension:
  12746. /* No need to change output value if any of:
  12747. - pre (<=) ARMv5T input architecture (do not have DSP)
  12748. - M input profile not ARMv7E-M and do not have DSP. */
  12749. if (in_attr[Tag_CPU_arch].i <= 3
  12750. || (in_attr[Tag_CPU_arch_profile].i == 'M'
  12751. && in_attr[Tag_CPU_arch].i != 13
  12752. && in_attr[i].i == 0))
  12753. ; /* Do nothing. */
  12754. /* Output value should be 0 if DSP part of architecture, ie.
  12755. - post (>=) ARMv5te architecture output
  12756. - A, R or S profile output or ARMv7E-M output architecture. */
  12757. else if (out_attr[Tag_CPU_arch].i >= 4
  12758. && (out_attr[Tag_CPU_arch_profile].i == 'A'
  12759. || out_attr[Tag_CPU_arch_profile].i == 'R'
  12760. || out_attr[Tag_CPU_arch_profile].i == 'S'
  12761. || out_attr[Tag_CPU_arch].i == 13))
  12762. out_attr[i].i = 0;
  12763. /* Otherwise, DSP instructions are added and not part of output
  12764. architecture. */
  12765. else
  12766. out_attr[i].i = 1;
  12767. break;
  12768. case Tag_FP_arch:
  12769. {
  12770. /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
  12771. the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
  12772. when it's 0. It might mean absence of FP hardware if
  12773. Tag_FP_arch is zero. */
  12774. #define VFP_VERSION_COUNT 9
  12775. static const struct
  12776. {
  12777. int ver;
  12778. int regs;
  12779. } vfp_versions[VFP_VERSION_COUNT] =
  12780. {
  12781. {0, 0},
  12782. {1, 16},
  12783. {2, 16},
  12784. {3, 32},
  12785. {3, 16},
  12786. {4, 32},
  12787. {4, 16},
  12788. {8, 32},
  12789. {8, 16}
  12790. };
  12791. int ver;
  12792. int regs;
  12793. int newval;
  12794. /* If the output has no requirement about FP hardware,
  12795. follow the requirement of the input. */
  12796. if (out_attr[i].i == 0)
  12797. {
  12798. /* This assert is still reasonable, we shouldn't
  12799. produce the suspicious build attribute
  12800. combination (See below for in_attr). */
  12801. BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
  12802. out_attr[i].i = in_attr[i].i;
  12803. out_attr[Tag_ABI_HardFP_use].i
  12804. = in_attr[Tag_ABI_HardFP_use].i;
  12805. break;
  12806. }
  12807. /* If the input has no requirement about FP hardware, do
  12808. nothing. */
  12809. else if (in_attr[i].i == 0)
  12810. {
  12811. /* We used to assert that Tag_ABI_HardFP_use was
  12812. zero here, but we should never assert when
  12813. consuming an object file that has suspicious
  12814. build attributes. The single precision variant
  12815. of 'no FP architecture' is still 'no FP
  12816. architecture', so we just ignore the tag in this
  12817. case. */
  12818. break;
  12819. }
  12820. /* Both the input and the output have nonzero Tag_FP_arch.
  12821. So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
  12822. /* If both the input and the output have zero Tag_ABI_HardFP_use,
  12823. do nothing. */
  12824. if (in_attr[Tag_ABI_HardFP_use].i == 0
  12825. && out_attr[Tag_ABI_HardFP_use].i == 0)
  12826. ;
  12827. /* If the input and the output have different Tag_ABI_HardFP_use,
  12828. the combination of them is 0 (implied by Tag_FP_arch). */
  12829. else if (in_attr[Tag_ABI_HardFP_use].i
  12830. != out_attr[Tag_ABI_HardFP_use].i)
  12831. out_attr[Tag_ABI_HardFP_use].i = 0;
  12832. /* Now we can handle Tag_FP_arch. */
  12833. /* Values of VFP_VERSION_COUNT or more aren't defined, so just
  12834. pick the biggest. */
  12835. if (in_attr[i].i >= VFP_VERSION_COUNT
  12836. && in_attr[i].i > out_attr[i].i)
  12837. {
  12838. out_attr[i] = in_attr[i];
  12839. break;
  12840. }
  12841. /* The output uses the superset of input features
  12842. (ISA version) and registers. */
  12843. ver = vfp_versions[in_attr[i].i].ver;
  12844. if (ver < vfp_versions[out_attr[i].i].ver)
  12845. ver = vfp_versions[out_attr[i].i].ver;
  12846. regs = vfp_versions[in_attr[i].i].regs;
  12847. if (regs < vfp_versions[out_attr[i].i].regs)
  12848. regs = vfp_versions[out_attr[i].i].regs;
  12849. /* This assumes all possible supersets are also a valid
  12850. options. */
  12851. for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
  12852. {
  12853. if (regs == vfp_versions[newval].regs
  12854. && ver == vfp_versions[newval].ver)
  12855. break;
  12856. }
  12857. out_attr[i].i = newval;
  12858. }
  12859. break;
  12860. case Tag_PCS_config:
  12861. if (out_attr[i].i == 0)
  12862. out_attr[i].i = in_attr[i].i;
  12863. else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
  12864. {
  12865. /* It's sometimes ok to mix different configs, so this is only
  12866. a warning. */
  12867. _bfd_error_handler
  12868. (_("warning: %pB: conflicting platform configuration"), ibfd);
  12869. }
  12870. break;
  12871. case Tag_ABI_PCS_R9_use:
  12872. if (in_attr[i].i != out_attr[i].i
  12873. && out_attr[i].i != AEABI_R9_unused
  12874. && in_attr[i].i != AEABI_R9_unused)
  12875. {
  12876. _bfd_error_handler
  12877. (_("error: %pB: conflicting use of R9"), ibfd);
  12878. result = false;
  12879. }
  12880. if (out_attr[i].i == AEABI_R9_unused)
  12881. out_attr[i].i = in_attr[i].i;
  12882. break;
  12883. case Tag_ABI_PCS_RW_data:
  12884. if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
  12885. && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
  12886. && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
  12887. {
  12888. _bfd_error_handler
  12889. (_("error: %pB: SB relative addressing conflicts with use of R9"),
  12890. ibfd);
  12891. result = false;
  12892. }
  12893. /* Use the smallest value specified. */
  12894. if (in_attr[i].i < out_attr[i].i)
  12895. out_attr[i].i = in_attr[i].i;
  12896. break;
  12897. case Tag_ABI_PCS_wchar_t:
  12898. if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
  12899. && !elf_arm_tdata (obfd)->no_wchar_size_warning)
  12900. {
  12901. _bfd_error_handler
  12902. (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
  12903. ibfd, in_attr[i].i, out_attr[i].i);
  12904. }
  12905. else if (in_attr[i].i && !out_attr[i].i)
  12906. out_attr[i].i = in_attr[i].i;
  12907. break;
  12908. case Tag_ABI_enum_size:
  12909. if (in_attr[i].i != AEABI_enum_unused)
  12910. {
  12911. if (out_attr[i].i == AEABI_enum_unused
  12912. || out_attr[i].i == AEABI_enum_forced_wide)
  12913. {
  12914. /* The existing object is compatible with anything.
  12915. Use whatever requirements the new object has. */
  12916. out_attr[i].i = in_attr[i].i;
  12917. }
  12918. else if (in_attr[i].i != AEABI_enum_forced_wide
  12919. && out_attr[i].i != in_attr[i].i
  12920. && !elf_arm_tdata (obfd)->no_enum_size_warning)
  12921. {
  12922. static const char *aeabi_enum_names[] =
  12923. { "", "variable-size", "32-bit", "" };
  12924. const char *in_name =
  12925. in_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
  12926. ? aeabi_enum_names[in_attr[i].i]
  12927. : "<unknown>";
  12928. const char *out_name =
  12929. out_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
  12930. ? aeabi_enum_names[out_attr[i].i]
  12931. : "<unknown>";
  12932. _bfd_error_handler
  12933. (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
  12934. ibfd, in_name, out_name);
  12935. }
  12936. }
  12937. break;
  12938. case Tag_ABI_VFP_args:
  12939. /* Aready done. */
  12940. break;
  12941. case Tag_ABI_WMMX_args:
  12942. if (in_attr[i].i != out_attr[i].i)
  12943. {
  12944. _bfd_error_handler
  12945. (_("error: %pB uses iWMMXt register arguments, %pB does not"),
  12946. ibfd, obfd);
  12947. result = false;
  12948. }
  12949. break;
  12950. case Tag_compatibility:
  12951. /* Merged in target-independent code. */
  12952. break;
  12953. case Tag_ABI_HardFP_use:
  12954. /* This is handled along with Tag_FP_arch. */
  12955. break;
  12956. case Tag_ABI_FP_16bit_format:
  12957. if (in_attr[i].i != 0 && out_attr[i].i != 0)
  12958. {
  12959. if (in_attr[i].i != out_attr[i].i)
  12960. {
  12961. _bfd_error_handler
  12962. (_("error: fp16 format mismatch between %pB and %pB"),
  12963. ibfd, obfd);
  12964. result = false;
  12965. }
  12966. }
  12967. if (in_attr[i].i != 0)
  12968. out_attr[i].i = in_attr[i].i;
  12969. break;
  12970. case Tag_DIV_use:
  12971. /* A value of zero on input means that the divide instruction may
  12972. be used if available in the base architecture as specified via
  12973. Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
  12974. the user did not want divide instructions. A value of 2
  12975. explicitly means that divide instructions were allowed in ARM
  12976. and Thumb state. */
  12977. if (in_attr[i].i == out_attr[i].i)
  12978. /* Do nothing. */ ;
  12979. else if (elf32_arm_attributes_forbid_div (in_attr)
  12980. && !elf32_arm_attributes_accept_div (out_attr))
  12981. out_attr[i].i = 1;
  12982. else if (elf32_arm_attributes_forbid_div (out_attr)
  12983. && elf32_arm_attributes_accept_div (in_attr))
  12984. out_attr[i].i = in_attr[i].i;
  12985. else if (in_attr[i].i == 2)
  12986. out_attr[i].i = in_attr[i].i;
  12987. break;
  12988. case Tag_MPextension_use_legacy:
  12989. /* We don't output objects with Tag_MPextension_use_legacy - we
  12990. move the value to Tag_MPextension_use. */
  12991. if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
  12992. {
  12993. if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
  12994. {
  12995. _bfd_error_handler
  12996. (_("%pB has both the current and legacy "
  12997. "Tag_MPextension_use attributes"),
  12998. ibfd);
  12999. result = false;
  13000. }
  13001. }
  13002. if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
  13003. out_attr[Tag_MPextension_use] = in_attr[i];
  13004. break;
  13005. case Tag_nodefaults:
  13006. /* This tag is set if it exists, but the value is unused (and is
  13007. typically zero). We don't actually need to do anything here -
  13008. the merge happens automatically when the type flags are merged
  13009. below. */
  13010. break;
  13011. case Tag_also_compatible_with:
  13012. /* Already done in Tag_CPU_arch. */
  13013. break;
  13014. case Tag_conformance:
  13015. /* Keep the attribute if it matches. Throw it away otherwise.
  13016. No attribute means no claim to conform. */
  13017. if (!in_attr[i].s || !out_attr[i].s
  13018. || strcmp (in_attr[i].s, out_attr[i].s) != 0)
  13019. out_attr[i].s = NULL;
  13020. break;
  13021. default:
  13022. result
  13023. = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
  13024. }
  13025. /* If out_attr was copied from in_attr then it won't have a type yet. */
  13026. if (in_attr[i].type && !out_attr[i].type)
  13027. out_attr[i].type = in_attr[i].type;
  13028. }
  13029. /* Merge Tag_compatibility attributes and any common GNU ones. */
  13030. if (!_bfd_elf_merge_object_attributes (ibfd, info))
  13031. return false;
  13032. /* Check for any attributes not known on ARM. */
  13033. result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
  13034. return result;
  13035. }
  13036. /* Return TRUE if the two EABI versions are incompatible. */
  13037. static bool
  13038. elf32_arm_versions_compatible (unsigned iver, unsigned over)
  13039. {
  13040. /* v4 and v5 are the same spec before and after it was released,
  13041. so allow mixing them. */
  13042. if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
  13043. || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
  13044. return true;
  13045. return (iver == over);
  13046. }
  13047. /* Merge backend specific data from an object file to the output
  13048. object file when linking. */
  13049. static bool
  13050. elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
  13051. /* Display the flags field. */
  13052. static bool
  13053. elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
  13054. {
  13055. FILE * file = (FILE *) ptr;
  13056. unsigned long flags;
  13057. BFD_ASSERT (abfd != NULL && ptr != NULL);
  13058. /* Print normal ELF private data. */
  13059. _bfd_elf_print_private_bfd_data (abfd, ptr);
  13060. flags = elf_elfheader (abfd)->e_flags;
  13061. /* Ignore init flag - it may not be set, despite the flags field
  13062. containing valid data. */
  13063. fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
  13064. switch (EF_ARM_EABI_VERSION (flags))
  13065. {
  13066. case EF_ARM_EABI_UNKNOWN:
  13067. /* The following flag bits are GNU extensions and not part of the
  13068. official ARM ELF extended ABI. Hence they are only decoded if
  13069. the EABI version is not set. */
  13070. if (flags & EF_ARM_INTERWORK)
  13071. fprintf (file, _(" [interworking enabled]"));
  13072. if (flags & EF_ARM_APCS_26)
  13073. fprintf (file, " [APCS-26]");
  13074. else
  13075. fprintf (file, " [APCS-32]");
  13076. if (flags & EF_ARM_VFP_FLOAT)
  13077. fprintf (file, _(" [VFP float format]"));
  13078. else if (flags & EF_ARM_MAVERICK_FLOAT)
  13079. fprintf (file, _(" [Maverick float format]"));
  13080. else
  13081. fprintf (file, _(" [FPA float format]"));
  13082. if (flags & EF_ARM_APCS_FLOAT)
  13083. fprintf (file, _(" [floats passed in float registers]"));
  13084. if (flags & EF_ARM_PIC)
  13085. fprintf (file, _(" [position independent]"));
  13086. if (flags & EF_ARM_NEW_ABI)
  13087. fprintf (file, _(" [new ABI]"));
  13088. if (flags & EF_ARM_OLD_ABI)
  13089. fprintf (file, _(" [old ABI]"));
  13090. if (flags & EF_ARM_SOFT_FLOAT)
  13091. fprintf (file, _(" [software FP]"));
  13092. flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
  13093. | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
  13094. | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
  13095. | EF_ARM_MAVERICK_FLOAT);
  13096. break;
  13097. case EF_ARM_EABI_VER1:
  13098. fprintf (file, _(" [Version1 EABI]"));
  13099. if (flags & EF_ARM_SYMSARESORTED)
  13100. fprintf (file, _(" [sorted symbol table]"));
  13101. else
  13102. fprintf (file, _(" [unsorted symbol table]"));
  13103. flags &= ~ EF_ARM_SYMSARESORTED;
  13104. break;
  13105. case EF_ARM_EABI_VER2:
  13106. fprintf (file, _(" [Version2 EABI]"));
  13107. if (flags & EF_ARM_SYMSARESORTED)
  13108. fprintf (file, _(" [sorted symbol table]"));
  13109. else
  13110. fprintf (file, _(" [unsorted symbol table]"));
  13111. if (flags & EF_ARM_DYNSYMSUSESEGIDX)
  13112. fprintf (file, _(" [dynamic symbols use segment index]"));
  13113. if (flags & EF_ARM_MAPSYMSFIRST)
  13114. fprintf (file, _(" [mapping symbols precede others]"));
  13115. flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
  13116. | EF_ARM_MAPSYMSFIRST);
  13117. break;
  13118. case EF_ARM_EABI_VER3:
  13119. fprintf (file, _(" [Version3 EABI]"));
  13120. break;
  13121. case EF_ARM_EABI_VER4:
  13122. fprintf (file, _(" [Version4 EABI]"));
  13123. goto eabi;
  13124. case EF_ARM_EABI_VER5:
  13125. fprintf (file, _(" [Version5 EABI]"));
  13126. if (flags & EF_ARM_ABI_FLOAT_SOFT)
  13127. fprintf (file, _(" [soft-float ABI]"));
  13128. if (flags & EF_ARM_ABI_FLOAT_HARD)
  13129. fprintf (file, _(" [hard-float ABI]"));
  13130. flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
  13131. eabi:
  13132. if (flags & EF_ARM_BE8)
  13133. fprintf (file, _(" [BE8]"));
  13134. if (flags & EF_ARM_LE8)
  13135. fprintf (file, _(" [LE8]"));
  13136. flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
  13137. break;
  13138. default:
  13139. fprintf (file, _(" <EABI version unrecognised>"));
  13140. break;
  13141. }
  13142. flags &= ~ EF_ARM_EABIMASK;
  13143. if (flags & EF_ARM_RELEXEC)
  13144. fprintf (file, _(" [relocatable executable]"));
  13145. if (flags & EF_ARM_PIC)
  13146. fprintf (file, _(" [position independent]"));
  13147. if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
  13148. fprintf (file, _(" [FDPIC ABI supplement]"));
  13149. flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
  13150. if (flags)
  13151. fprintf (file, _(" <Unrecognised flag bits set>"));
  13152. fputc ('\n', file);
  13153. return true;
  13154. }
  13155. static int
  13156. elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
  13157. {
  13158. switch (ELF_ST_TYPE (elf_sym->st_info))
  13159. {
  13160. case STT_ARM_TFUNC:
  13161. return ELF_ST_TYPE (elf_sym->st_info);
  13162. case STT_ARM_16BIT:
  13163. /* If the symbol is not an object, return the STT_ARM_16BIT flag.
  13164. This allows us to distinguish between data used by Thumb instructions
  13165. and non-data (which is probably code) inside Thumb regions of an
  13166. executable. */
  13167. if (type != STT_OBJECT && type != STT_TLS)
  13168. return ELF_ST_TYPE (elf_sym->st_info);
  13169. break;
  13170. default:
  13171. break;
  13172. }
  13173. return type;
  13174. }
  13175. static asection *
  13176. elf32_arm_gc_mark_hook (asection *sec,
  13177. struct bfd_link_info *info,
  13178. Elf_Internal_Rela *rel,
  13179. struct elf_link_hash_entry *h,
  13180. Elf_Internal_Sym *sym)
  13181. {
  13182. if (h != NULL)
  13183. switch (ELF32_R_TYPE (rel->r_info))
  13184. {
  13185. case R_ARM_GNU_VTINHERIT:
  13186. case R_ARM_GNU_VTENTRY:
  13187. return NULL;
  13188. }
  13189. return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
  13190. }
  13191. /* Look through the relocs for a section during the first phase. */
  13192. static bool
  13193. elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
  13194. asection *sec, const Elf_Internal_Rela *relocs)
  13195. {
  13196. Elf_Internal_Shdr *symtab_hdr;
  13197. struct elf_link_hash_entry **sym_hashes;
  13198. const Elf_Internal_Rela *rel;
  13199. const Elf_Internal_Rela *rel_end;
  13200. bfd *dynobj;
  13201. asection *sreloc;
  13202. struct elf32_arm_link_hash_table *htab;
  13203. bool call_reloc_p;
  13204. bool may_become_dynamic_p;
  13205. bool may_need_local_target_p;
  13206. unsigned long nsyms;
  13207. if (bfd_link_relocatable (info))
  13208. return true;
  13209. BFD_ASSERT (is_arm_elf (abfd));
  13210. htab = elf32_arm_hash_table (info);
  13211. if (htab == NULL)
  13212. return false;
  13213. sreloc = NULL;
  13214. /* Create dynamic sections for relocatable executables so that we can
  13215. copy relocations. */
  13216. if (htab->root.is_relocatable_executable
  13217. && ! htab->root.dynamic_sections_created)
  13218. {
  13219. if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
  13220. return false;
  13221. }
  13222. if (htab->root.dynobj == NULL)
  13223. htab->root.dynobj = abfd;
  13224. if (!create_ifunc_sections (info))
  13225. return false;
  13226. dynobj = htab->root.dynobj;
  13227. symtab_hdr = & elf_symtab_hdr (abfd);
  13228. sym_hashes = elf_sym_hashes (abfd);
  13229. nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
  13230. rel_end = relocs + sec->reloc_count;
  13231. for (rel = relocs; rel < rel_end; rel++)
  13232. {
  13233. Elf_Internal_Sym *isym;
  13234. struct elf_link_hash_entry *h;
  13235. struct elf32_arm_link_hash_entry *eh;
  13236. unsigned int r_symndx;
  13237. int r_type;
  13238. r_symndx = ELF32_R_SYM (rel->r_info);
  13239. r_type = ELF32_R_TYPE (rel->r_info);
  13240. r_type = arm_real_reloc_type (htab, r_type);
  13241. if (r_symndx >= nsyms
  13242. /* PR 9934: It is possible to have relocations that do not
  13243. refer to symbols, thus it is also possible to have an
  13244. object file containing relocations but no symbol table. */
  13245. && (r_symndx > STN_UNDEF || nsyms > 0))
  13246. {
  13247. _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
  13248. r_symndx);
  13249. return false;
  13250. }
  13251. h = NULL;
  13252. isym = NULL;
  13253. if (nsyms > 0)
  13254. {
  13255. if (r_symndx < symtab_hdr->sh_info)
  13256. {
  13257. /* A local symbol. */
  13258. isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
  13259. abfd, r_symndx);
  13260. if (isym == NULL)
  13261. return false;
  13262. }
  13263. else
  13264. {
  13265. h = sym_hashes[r_symndx - symtab_hdr->sh_info];
  13266. while (h->root.type == bfd_link_hash_indirect
  13267. || h->root.type == bfd_link_hash_warning)
  13268. h = (struct elf_link_hash_entry *) h->root.u.i.link;
  13269. }
  13270. }
  13271. eh = (struct elf32_arm_link_hash_entry *) h;
  13272. call_reloc_p = false;
  13273. may_become_dynamic_p = false;
  13274. may_need_local_target_p = false;
  13275. /* Could be done earlier, if h were already available. */
  13276. r_type = elf32_arm_tls_transition (info, r_type, h);
  13277. switch (r_type)
  13278. {
  13279. case R_ARM_GOTOFFFUNCDESC:
  13280. {
  13281. if (h == NULL)
  13282. {
  13283. if (!elf32_arm_allocate_local_sym_info (abfd))
  13284. return false;
  13285. if (r_symndx >= elf32_arm_num_entries (abfd))
  13286. return false;
  13287. elf32_arm_local_fdpic_cnts (abfd) [r_symndx].gotofffuncdesc_cnt += 1;
  13288. elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
  13289. }
  13290. else
  13291. {
  13292. eh->fdpic_cnts.gotofffuncdesc_cnt++;
  13293. }
  13294. }
  13295. break;
  13296. case R_ARM_GOTFUNCDESC:
  13297. {
  13298. if (h == NULL)
  13299. {
  13300. /* Such a relocation is not supposed to be generated
  13301. by gcc on a static function. */
  13302. /* Anyway if needed it could be handled. */
  13303. return false;
  13304. }
  13305. else
  13306. {
  13307. eh->fdpic_cnts.gotfuncdesc_cnt++;
  13308. }
  13309. }
  13310. break;
  13311. case R_ARM_FUNCDESC:
  13312. {
  13313. if (h == NULL)
  13314. {
  13315. if (!elf32_arm_allocate_local_sym_info (abfd))
  13316. return false;
  13317. if (r_symndx >= elf32_arm_num_entries (abfd))
  13318. return false;
  13319. elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_cnt += 1;
  13320. elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
  13321. }
  13322. else
  13323. {
  13324. eh->fdpic_cnts.funcdesc_cnt++;
  13325. }
  13326. }
  13327. break;
  13328. case R_ARM_GOT32:
  13329. case R_ARM_GOT_PREL:
  13330. case R_ARM_TLS_GD32:
  13331. case R_ARM_TLS_GD32_FDPIC:
  13332. case R_ARM_TLS_IE32:
  13333. case R_ARM_TLS_IE32_FDPIC:
  13334. case R_ARM_TLS_GOTDESC:
  13335. case R_ARM_TLS_DESCSEQ:
  13336. case R_ARM_THM_TLS_DESCSEQ:
  13337. case R_ARM_TLS_CALL:
  13338. case R_ARM_THM_TLS_CALL:
  13339. /* This symbol requires a global offset table entry. */
  13340. {
  13341. int tls_type, old_tls_type;
  13342. switch (r_type)
  13343. {
  13344. case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
  13345. case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
  13346. case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
  13347. case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
  13348. case R_ARM_TLS_GOTDESC:
  13349. case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
  13350. case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
  13351. tls_type = GOT_TLS_GDESC; break;
  13352. default: tls_type = GOT_NORMAL; break;
  13353. }
  13354. if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
  13355. info->flags |= DF_STATIC_TLS;
  13356. if (h != NULL)
  13357. {
  13358. h->got.refcount++;
  13359. old_tls_type = elf32_arm_hash_entry (h)->tls_type;
  13360. }
  13361. else
  13362. {
  13363. /* This is a global offset table entry for a local symbol. */
  13364. if (!elf32_arm_allocate_local_sym_info (abfd))
  13365. return false;
  13366. if (r_symndx >= elf32_arm_num_entries (abfd))
  13367. {
  13368. _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
  13369. r_symndx);
  13370. return false;
  13371. }
  13372. elf_local_got_refcounts (abfd)[r_symndx] += 1;
  13373. old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
  13374. }
  13375. /* If a variable is accessed with both tls methods, two
  13376. slots may be created. */
  13377. if (GOT_TLS_GD_ANY_P (old_tls_type)
  13378. && GOT_TLS_GD_ANY_P (tls_type))
  13379. tls_type |= old_tls_type;
  13380. /* We will already have issued an error message if there
  13381. is a TLS/non-TLS mismatch, based on the symbol
  13382. type. So just combine any TLS types needed. */
  13383. if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
  13384. && tls_type != GOT_NORMAL)
  13385. tls_type |= old_tls_type;
  13386. /* If the symbol is accessed in both IE and GDESC
  13387. method, we're able to relax. Turn off the GDESC flag,
  13388. without messing up with any other kind of tls types
  13389. that may be involved. */
  13390. if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
  13391. tls_type &= ~GOT_TLS_GDESC;
  13392. if (old_tls_type != tls_type)
  13393. {
  13394. if (h != NULL)
  13395. elf32_arm_hash_entry (h)->tls_type = tls_type;
  13396. else
  13397. elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
  13398. }
  13399. }
  13400. /* Fall through. */
  13401. case R_ARM_TLS_LDM32:
  13402. case R_ARM_TLS_LDM32_FDPIC:
  13403. if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
  13404. htab->tls_ldm_got.refcount++;
  13405. /* Fall through. */
  13406. case R_ARM_GOTOFF32:
  13407. case R_ARM_GOTPC:
  13408. if (htab->root.sgot == NULL
  13409. && !create_got_section (htab->root.dynobj, info))
  13410. return false;
  13411. break;
  13412. case R_ARM_PC24:
  13413. case R_ARM_PLT32:
  13414. case R_ARM_CALL:
  13415. case R_ARM_JUMP24:
  13416. case R_ARM_PREL31:
  13417. case R_ARM_THM_CALL:
  13418. case R_ARM_THM_JUMP24:
  13419. case R_ARM_THM_JUMP19:
  13420. call_reloc_p = true;
  13421. may_need_local_target_p = true;
  13422. break;
  13423. case R_ARM_ABS12:
  13424. /* VxWorks uses dynamic R_ARM_ABS12 relocations for
  13425. ldr __GOTT_INDEX__ offsets. */
  13426. if (htab->root.target_os != is_vxworks)
  13427. {
  13428. may_need_local_target_p = true;
  13429. break;
  13430. }
  13431. else goto jump_over;
  13432. /* Fall through. */
  13433. case R_ARM_MOVW_ABS_NC:
  13434. case R_ARM_MOVT_ABS:
  13435. case R_ARM_THM_MOVW_ABS_NC:
  13436. case R_ARM_THM_MOVT_ABS:
  13437. if (bfd_link_pic (info))
  13438. {
  13439. _bfd_error_handler
  13440. (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
  13441. abfd, elf32_arm_howto_table_1[r_type].name,
  13442. (h) ? h->root.root.string : "a local symbol");
  13443. bfd_set_error (bfd_error_bad_value);
  13444. return false;
  13445. }
  13446. /* Fall through. */
  13447. case R_ARM_ABS32:
  13448. case R_ARM_ABS32_NOI:
  13449. jump_over:
  13450. if (h != NULL && bfd_link_executable (info))
  13451. {
  13452. h->pointer_equality_needed = 1;
  13453. }
  13454. /* Fall through. */
  13455. case R_ARM_REL32:
  13456. case R_ARM_REL32_NOI:
  13457. case R_ARM_MOVW_PREL_NC:
  13458. case R_ARM_MOVT_PREL:
  13459. case R_ARM_THM_MOVW_PREL_NC:
  13460. case R_ARM_THM_MOVT_PREL:
  13461. /* Should the interworking branches be listed here? */
  13462. if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
  13463. || htab->fdpic_p)
  13464. && (sec->flags & SEC_ALLOC) != 0)
  13465. {
  13466. if (h == NULL
  13467. && elf32_arm_howto_from_type (r_type)->pc_relative)
  13468. {
  13469. /* In shared libraries and relocatable executables,
  13470. we treat local relative references as calls;
  13471. see the related SYMBOL_CALLS_LOCAL code in
  13472. allocate_dynrelocs. */
  13473. call_reloc_p = true;
  13474. may_need_local_target_p = true;
  13475. }
  13476. else
  13477. /* We are creating a shared library or relocatable
  13478. executable, and this is a reloc against a global symbol,
  13479. or a non-PC-relative reloc against a local symbol.
  13480. We may need to copy the reloc into the output. */
  13481. may_become_dynamic_p = true;
  13482. }
  13483. else
  13484. may_need_local_target_p = true;
  13485. break;
  13486. /* This relocation describes the C++ object vtable hierarchy.
  13487. Reconstruct it for later use during GC. */
  13488. case R_ARM_GNU_VTINHERIT:
  13489. if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
  13490. return false;
  13491. break;
  13492. /* This relocation describes which C++ vtable entries are actually
  13493. used. Record for later use during GC. */
  13494. case R_ARM_GNU_VTENTRY:
  13495. if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
  13496. return false;
  13497. break;
  13498. }
  13499. if (h != NULL)
  13500. {
  13501. if (call_reloc_p)
  13502. /* We may need a .plt entry if the function this reloc
  13503. refers to is in a different object, regardless of the
  13504. symbol's type. We can't tell for sure yet, because
  13505. something later might force the symbol local. */
  13506. h->needs_plt = 1;
  13507. else if (may_need_local_target_p)
  13508. /* If this reloc is in a read-only section, we might
  13509. need a copy reloc. We can't check reliably at this
  13510. stage whether the section is read-only, as input
  13511. sections have not yet been mapped to output sections.
  13512. Tentatively set the flag for now, and correct in
  13513. adjust_dynamic_symbol. */
  13514. h->non_got_ref = 1;
  13515. }
  13516. if (may_need_local_target_p
  13517. && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
  13518. {
  13519. union gotplt_union *root_plt;
  13520. struct arm_plt_info *arm_plt;
  13521. struct arm_local_iplt_info *local_iplt;
  13522. if (h != NULL)
  13523. {
  13524. root_plt = &h->plt;
  13525. arm_plt = &eh->plt;
  13526. }
  13527. else
  13528. {
  13529. local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
  13530. if (local_iplt == NULL)
  13531. return false;
  13532. root_plt = &local_iplt->root;
  13533. arm_plt = &local_iplt->arm;
  13534. }
  13535. /* If the symbol is a function that doesn't bind locally,
  13536. this relocation will need a PLT entry. */
  13537. if (root_plt->refcount != -1)
  13538. root_plt->refcount += 1;
  13539. if (!call_reloc_p)
  13540. arm_plt->noncall_refcount++;
  13541. /* It's too early to use htab->use_blx here, so we have to
  13542. record possible blx references separately from
  13543. relocs that definitely need a thumb stub. */
  13544. if (r_type == R_ARM_THM_CALL)
  13545. arm_plt->maybe_thumb_refcount += 1;
  13546. if (r_type == R_ARM_THM_JUMP24
  13547. || r_type == R_ARM_THM_JUMP19)
  13548. arm_plt->thumb_refcount += 1;
  13549. }
  13550. if (may_become_dynamic_p)
  13551. {
  13552. struct elf_dyn_relocs *p, **head;
  13553. /* Create a reloc section in dynobj. */
  13554. if (sreloc == NULL)
  13555. {
  13556. sreloc = _bfd_elf_make_dynamic_reloc_section
  13557. (sec, dynobj, 2, abfd, ! htab->use_rel);
  13558. if (sreloc == NULL)
  13559. return false;
  13560. }
  13561. /* If this is a global symbol, count the number of
  13562. relocations we need for this symbol. */
  13563. if (h != NULL)
  13564. head = &h->dyn_relocs;
  13565. else
  13566. {
  13567. head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
  13568. if (head == NULL)
  13569. return false;
  13570. }
  13571. p = *head;
  13572. if (p == NULL || p->sec != sec)
  13573. {
  13574. size_t amt = sizeof *p;
  13575. p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
  13576. if (p == NULL)
  13577. return false;
  13578. p->next = *head;
  13579. *head = p;
  13580. p->sec = sec;
  13581. p->count = 0;
  13582. p->pc_count = 0;
  13583. }
  13584. if (elf32_arm_howto_from_type (r_type)->pc_relative)
  13585. p->pc_count += 1;
  13586. p->count += 1;
  13587. if (h == NULL && htab->fdpic_p && !bfd_link_pic (info)
  13588. && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI)
  13589. {
  13590. /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
  13591. that will become rofixup. */
  13592. /* This is due to the fact that we suppose all will become rofixup. */
  13593. _bfd_error_handler
  13594. (_("FDPIC does not yet support %s relocation"
  13595. " to become dynamic for executable"),
  13596. elf32_arm_howto_table_1[r_type].name);
  13597. abort ();
  13598. }
  13599. }
  13600. }
  13601. return true;
  13602. }
  13603. static void
  13604. elf32_arm_update_relocs (asection *o,
  13605. struct bfd_elf_section_reloc_data *reldata)
  13606. {
  13607. void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
  13608. void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
  13609. const struct elf_backend_data *bed;
  13610. _arm_elf_section_data *eado;
  13611. struct bfd_link_order *p;
  13612. bfd_byte *erela_head, *erela;
  13613. Elf_Internal_Rela *irela_head, *irela;
  13614. Elf_Internal_Shdr *rel_hdr;
  13615. bfd *abfd;
  13616. unsigned int count;
  13617. eado = get_arm_elf_section_data (o);
  13618. if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
  13619. return;
  13620. abfd = o->owner;
  13621. bed = get_elf_backend_data (abfd);
  13622. rel_hdr = reldata->hdr;
  13623. if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
  13624. {
  13625. swap_in = bed->s->swap_reloc_in;
  13626. swap_out = bed->s->swap_reloc_out;
  13627. }
  13628. else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
  13629. {
  13630. swap_in = bed->s->swap_reloca_in;
  13631. swap_out = bed->s->swap_reloca_out;
  13632. }
  13633. else
  13634. abort ();
  13635. erela_head = rel_hdr->contents;
  13636. irela_head = (Elf_Internal_Rela *) bfd_zmalloc
  13637. ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
  13638. erela = erela_head;
  13639. irela = irela_head;
  13640. count = 0;
  13641. for (p = o->map_head.link_order; p; p = p->next)
  13642. {
  13643. if (p->type == bfd_section_reloc_link_order
  13644. || p->type == bfd_symbol_reloc_link_order)
  13645. {
  13646. (*swap_in) (abfd, erela, irela);
  13647. erela += rel_hdr->sh_entsize;
  13648. irela++;
  13649. count++;
  13650. }
  13651. else if (p->type == bfd_indirect_link_order)
  13652. {
  13653. struct bfd_elf_section_reloc_data *input_reldata;
  13654. arm_unwind_table_edit *edit_list, *edit_tail;
  13655. _arm_elf_section_data *eadi;
  13656. bfd_size_type j;
  13657. bfd_vma offset;
  13658. asection *i;
  13659. i = p->u.indirect.section;
  13660. eadi = get_arm_elf_section_data (i);
  13661. edit_list = eadi->u.exidx.unwind_edit_list;
  13662. edit_tail = eadi->u.exidx.unwind_edit_tail;
  13663. offset = i->output_offset;
  13664. if (eadi->elf.rel.hdr &&
  13665. eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
  13666. input_reldata = &eadi->elf.rel;
  13667. else if (eadi->elf.rela.hdr &&
  13668. eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
  13669. input_reldata = &eadi->elf.rela;
  13670. else
  13671. abort ();
  13672. if (edit_list)
  13673. {
  13674. for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
  13675. {
  13676. arm_unwind_table_edit *edit_node, *edit_next;
  13677. bfd_vma bias;
  13678. bfd_vma reloc_index;
  13679. (*swap_in) (abfd, erela, irela);
  13680. reloc_index = (irela->r_offset - offset) / 8;
  13681. bias = 0;
  13682. edit_node = edit_list;
  13683. for (edit_next = edit_list;
  13684. edit_next && edit_next->index <= reloc_index;
  13685. edit_next = edit_node->next)
  13686. {
  13687. bias++;
  13688. edit_node = edit_next;
  13689. }
  13690. if (edit_node->type != DELETE_EXIDX_ENTRY
  13691. || edit_node->index != reloc_index)
  13692. {
  13693. irela->r_offset -= bias * 8;
  13694. irela++;
  13695. count++;
  13696. }
  13697. erela += rel_hdr->sh_entsize;
  13698. }
  13699. if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
  13700. {
  13701. /* New relocation entity. */
  13702. asection *text_sec = edit_tail->linked_section;
  13703. asection *text_out = text_sec->output_section;
  13704. bfd_vma exidx_offset = offset + i->size - 8;
  13705. irela->r_addend = 0;
  13706. irela->r_offset = exidx_offset;
  13707. irela->r_info = ELF32_R_INFO
  13708. (text_out->target_index, R_ARM_PREL31);
  13709. irela++;
  13710. count++;
  13711. }
  13712. }
  13713. else
  13714. {
  13715. for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
  13716. {
  13717. (*swap_in) (abfd, erela, irela);
  13718. erela += rel_hdr->sh_entsize;
  13719. irela++;
  13720. }
  13721. count += NUM_SHDR_ENTRIES (input_reldata->hdr);
  13722. }
  13723. }
  13724. }
  13725. reldata->count = count;
  13726. rel_hdr->sh_size = count * rel_hdr->sh_entsize;
  13727. erela = erela_head;
  13728. irela = irela_head;
  13729. while (count > 0)
  13730. {
  13731. (*swap_out) (abfd, irela, erela);
  13732. erela += rel_hdr->sh_entsize;
  13733. irela++;
  13734. count--;
  13735. }
  13736. free (irela_head);
  13737. /* Hashes are no longer valid. */
  13738. free (reldata->hashes);
  13739. reldata->hashes = NULL;
  13740. }
  13741. /* Unwinding tables are not referenced directly. This pass marks them as
  13742. required if the corresponding code section is marked. Similarly, ARMv8-M
  13743. secure entry functions can only be referenced by SG veneers which are
  13744. created after the GC process. They need to be marked in case they reside in
  13745. their own section (as would be the case if code was compiled with
  13746. -ffunction-sections). */
  13747. static bool
  13748. elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
  13749. elf_gc_mark_hook_fn gc_mark_hook)
  13750. {
  13751. bfd *sub;
  13752. Elf_Internal_Shdr **elf_shdrp;
  13753. asection *cmse_sec;
  13754. obj_attribute *out_attr;
  13755. Elf_Internal_Shdr *symtab_hdr;
  13756. unsigned i, sym_count, ext_start;
  13757. const struct elf_backend_data *bed;
  13758. struct elf_link_hash_entry **sym_hashes;
  13759. struct elf32_arm_link_hash_entry *cmse_hash;
  13760. bool again, is_v8m, first_bfd_browse = true;
  13761. bool debug_sec_need_to_be_marked = false;
  13762. asection *isec;
  13763. _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
  13764. out_attr = elf_known_obj_attributes_proc (info->output_bfd);
  13765. is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
  13766. && out_attr[Tag_CPU_arch_profile].i == 'M';
  13767. /* Marking EH data may cause additional code sections to be marked,
  13768. requiring multiple passes. */
  13769. again = true;
  13770. while (again)
  13771. {
  13772. again = false;
  13773. for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
  13774. {
  13775. asection *o;
  13776. if (! is_arm_elf (sub))
  13777. continue;
  13778. elf_shdrp = elf_elfsections (sub);
  13779. for (o = sub->sections; o != NULL; o = o->next)
  13780. {
  13781. Elf_Internal_Shdr *hdr;
  13782. hdr = &elf_section_data (o)->this_hdr;
  13783. if (hdr->sh_type == SHT_ARM_EXIDX
  13784. && hdr->sh_link
  13785. && hdr->sh_link < elf_numsections (sub)
  13786. && !o->gc_mark
  13787. && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
  13788. {
  13789. again = true;
  13790. if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
  13791. return false;
  13792. }
  13793. }
  13794. /* Mark section holding ARMv8-M secure entry functions. We mark all
  13795. of them so no need for a second browsing. */
  13796. if (is_v8m && first_bfd_browse)
  13797. {
  13798. sym_hashes = elf_sym_hashes (sub);
  13799. bed = get_elf_backend_data (sub);
  13800. symtab_hdr = &elf_tdata (sub)->symtab_hdr;
  13801. sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
  13802. ext_start = symtab_hdr->sh_info;
  13803. /* Scan symbols. */
  13804. for (i = ext_start; i < sym_count; i++)
  13805. {
  13806. cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
  13807. /* Assume it is a special symbol. If not, cmse_scan will
  13808. warn about it and user can do something about it. */
  13809. if (startswith (cmse_hash->root.root.root.string,
  13810. CMSE_PREFIX))
  13811. {
  13812. cmse_sec = cmse_hash->root.root.u.def.section;
  13813. if (!cmse_sec->gc_mark
  13814. && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
  13815. return false;
  13816. /* The debug sections related to these secure entry
  13817. functions are marked on enabling below flag. */
  13818. debug_sec_need_to_be_marked = true;
  13819. }
  13820. }
  13821. if (debug_sec_need_to_be_marked)
  13822. {
  13823. /* Looping over all the sections of the object file containing
  13824. Armv8-M secure entry functions and marking all the debug
  13825. sections. */
  13826. for (isec = sub->sections; isec != NULL; isec = isec->next)
  13827. {
  13828. /* If not a debug sections, skip it. */
  13829. if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
  13830. isec->gc_mark = 1 ;
  13831. }
  13832. debug_sec_need_to_be_marked = false;
  13833. }
  13834. }
  13835. }
  13836. first_bfd_browse = false;
  13837. }
  13838. return true;
  13839. }
  13840. /* Treat mapping symbols as special target symbols. */
  13841. static bool
  13842. elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
  13843. {
  13844. return bfd_is_arm_special_symbol_name (sym->name,
  13845. BFD_ARM_SPECIAL_SYM_TYPE_ANY);
  13846. }
  13847. /* If the ELF symbol SYM might be a function in SEC, return the
  13848. function size and set *CODE_OFF to the function's entry point,
  13849. otherwise return zero. */
  13850. static bfd_size_type
  13851. elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
  13852. bfd_vma *code_off)
  13853. {
  13854. bfd_size_type size;
  13855. elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
  13856. if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
  13857. | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
  13858. || sym->section != sec)
  13859. return 0;
  13860. size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
  13861. if (!(sym->flags & BSF_SYNTHETIC))
  13862. switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
  13863. {
  13864. case STT_NOTYPE:
  13865. /* Ignore symbols created by the annobin plugin for gcc and clang.
  13866. These symbols are hidden, local, notype and have a size of 0. */
  13867. if (size == 0
  13868. && sym->flags & BSF_LOCAL
  13869. && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
  13870. return 0;
  13871. /* Fall through. */
  13872. case STT_FUNC:
  13873. case STT_ARM_TFUNC:
  13874. /* FIXME: Allow STT_GNU_IFUNC as well ? */
  13875. break;
  13876. default:
  13877. return 0;
  13878. }
  13879. if ((sym->flags & BSF_LOCAL)
  13880. && bfd_is_arm_special_symbol_name (sym->name,
  13881. BFD_ARM_SPECIAL_SYM_TYPE_ANY))
  13882. return 0;
  13883. *code_off = sym->value;
  13884. /* Do not return 0 for the function's size. */
  13885. return size ? size : 1;
  13886. }
  13887. static bool
  13888. elf32_arm_find_inliner_info (bfd * abfd,
  13889. const char ** filename_ptr,
  13890. const char ** functionname_ptr,
  13891. unsigned int * line_ptr)
  13892. {
  13893. bool found;
  13894. found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
  13895. functionname_ptr, line_ptr,
  13896. & elf_tdata (abfd)->dwarf2_find_line_info);
  13897. return found;
  13898. }
  13899. /* Adjust a symbol defined by a dynamic object and referenced by a
  13900. regular object. The current definition is in some section of the
  13901. dynamic object, but we're not including those sections. We have to
  13902. change the definition to something the rest of the link can
  13903. understand. */
  13904. static bool
  13905. elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
  13906. struct elf_link_hash_entry * h)
  13907. {
  13908. bfd * dynobj;
  13909. asection *s, *srel;
  13910. struct elf32_arm_link_hash_entry * eh;
  13911. struct elf32_arm_link_hash_table *globals;
  13912. globals = elf32_arm_hash_table (info);
  13913. if (globals == NULL)
  13914. return false;
  13915. dynobj = elf_hash_table (info)->dynobj;
  13916. /* Make sure we know what is going on here. */
  13917. BFD_ASSERT (dynobj != NULL
  13918. && (h->needs_plt
  13919. || h->type == STT_GNU_IFUNC
  13920. || h->is_weakalias
  13921. || (h->def_dynamic
  13922. && h->ref_regular
  13923. && !h->def_regular)));
  13924. eh = (struct elf32_arm_link_hash_entry *) h;
  13925. /* If this is a function, put it in the procedure linkage table. We
  13926. will fill in the contents of the procedure linkage table later,
  13927. when we know the address of the .got section. */
  13928. if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
  13929. {
  13930. /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
  13931. symbol binds locally. */
  13932. if (h->plt.refcount <= 0
  13933. || (h->type != STT_GNU_IFUNC
  13934. && (SYMBOL_CALLS_LOCAL (info, h)
  13935. || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
  13936. && h->root.type == bfd_link_hash_undefweak))))
  13937. {
  13938. /* This case can occur if we saw a PLT32 reloc in an input
  13939. file, but the symbol was never referred to by a dynamic
  13940. object, or if all references were garbage collected. In
  13941. such a case, we don't actually need to build a procedure
  13942. linkage table, and we can just do a PC24 reloc instead. */
  13943. h->plt.offset = (bfd_vma) -1;
  13944. eh->plt.thumb_refcount = 0;
  13945. eh->plt.maybe_thumb_refcount = 0;
  13946. eh->plt.noncall_refcount = 0;
  13947. h->needs_plt = 0;
  13948. }
  13949. return true;
  13950. }
  13951. else
  13952. {
  13953. /* It's possible that we incorrectly decided a .plt reloc was
  13954. needed for an R_ARM_PC24 or similar reloc to a non-function sym
  13955. in check_relocs. We can't decide accurately between function
  13956. and non-function syms in check-relocs; Objects loaded later in
  13957. the link may change h->type. So fix it now. */
  13958. h->plt.offset = (bfd_vma) -1;
  13959. eh->plt.thumb_refcount = 0;
  13960. eh->plt.maybe_thumb_refcount = 0;
  13961. eh->plt.noncall_refcount = 0;
  13962. }
  13963. /* If this is a weak symbol, and there is a real definition, the
  13964. processor independent code will have arranged for us to see the
  13965. real definition first, and we can just use the same value. */
  13966. if (h->is_weakalias)
  13967. {
  13968. struct elf_link_hash_entry *def = weakdef (h);
  13969. BFD_ASSERT (def->root.type == bfd_link_hash_defined);
  13970. h->root.u.def.section = def->root.u.def.section;
  13971. h->root.u.def.value = def->root.u.def.value;
  13972. return true;
  13973. }
  13974. /* If there are no non-GOT references, we do not need a copy
  13975. relocation. */
  13976. if (!h->non_got_ref)
  13977. return true;
  13978. /* This is a reference to a symbol defined by a dynamic object which
  13979. is not a function. */
  13980. /* If we are creating a shared library, we must presume that the
  13981. only references to the symbol are via the global offset table.
  13982. For such cases we need not do anything here; the relocations will
  13983. be handled correctly by relocate_section. Relocatable executables
  13984. can reference data in shared objects directly, so we don't need to
  13985. do anything here. */
  13986. if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
  13987. return true;
  13988. /* We must allocate the symbol in our .dynbss section, which will
  13989. become part of the .bss section of the executable. There will be
  13990. an entry for this symbol in the .dynsym section. The dynamic
  13991. object will contain position independent code, so all references
  13992. from the dynamic object to this symbol will go through the global
  13993. offset table. The dynamic linker will use the .dynsym entry to
  13994. determine the address it must put in the global offset table, so
  13995. both the dynamic object and the regular object will refer to the
  13996. same memory location for the variable. */
  13997. /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
  13998. linker to copy the initial value out of the dynamic object and into
  13999. the runtime process image. We need to remember the offset into the
  14000. .rel(a).bss section we are going to use. */
  14001. if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
  14002. {
  14003. s = globals->root.sdynrelro;
  14004. srel = globals->root.sreldynrelro;
  14005. }
  14006. else
  14007. {
  14008. s = globals->root.sdynbss;
  14009. srel = globals->root.srelbss;
  14010. }
  14011. if (info->nocopyreloc == 0
  14012. && (h->root.u.def.section->flags & SEC_ALLOC) != 0
  14013. && h->size != 0)
  14014. {
  14015. elf32_arm_allocate_dynrelocs (info, srel, 1);
  14016. h->needs_copy = 1;
  14017. }
  14018. return _bfd_elf_adjust_dynamic_copy (info, h, s);
  14019. }
  14020. /* Allocate space in .plt, .got and associated reloc sections for
  14021. dynamic relocs. */
  14022. static bool
  14023. allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
  14024. {
  14025. struct bfd_link_info *info;
  14026. struct elf32_arm_link_hash_table *htab;
  14027. struct elf32_arm_link_hash_entry *eh;
  14028. struct elf_dyn_relocs *p;
  14029. if (h->root.type == bfd_link_hash_indirect)
  14030. return true;
  14031. eh = (struct elf32_arm_link_hash_entry *) h;
  14032. info = (struct bfd_link_info *) inf;
  14033. htab = elf32_arm_hash_table (info);
  14034. if (htab == NULL)
  14035. return false;
  14036. if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
  14037. && h->plt.refcount > 0)
  14038. {
  14039. /* Make sure this symbol is output as a dynamic symbol.
  14040. Undefined weak syms won't yet be marked as dynamic. */
  14041. if (h->dynindx == -1 && !h->forced_local
  14042. && h->root.type == bfd_link_hash_undefweak)
  14043. {
  14044. if (! bfd_elf_link_record_dynamic_symbol (info, h))
  14045. return false;
  14046. }
  14047. /* If the call in the PLT entry binds locally, the associated
  14048. GOT entry should use an R_ARM_IRELATIVE relocation instead of
  14049. the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
  14050. than the .plt section. */
  14051. if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
  14052. {
  14053. eh->is_iplt = 1;
  14054. if (eh->plt.noncall_refcount == 0
  14055. && SYMBOL_REFERENCES_LOCAL (info, h))
  14056. /* All non-call references can be resolved directly.
  14057. This means that they can (and in some cases, must)
  14058. resolve directly to the run-time target, rather than
  14059. to the PLT. That in turns means that any .got entry
  14060. would be equal to the .igot.plt entry, so there's
  14061. no point having both. */
  14062. h->got.refcount = 0;
  14063. }
  14064. if (bfd_link_pic (info)
  14065. || eh->is_iplt
  14066. || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
  14067. {
  14068. elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
  14069. /* If this symbol is not defined in a regular file, and we are
  14070. not generating a shared library, then set the symbol to this
  14071. location in the .plt. This is required to make function
  14072. pointers compare as equal between the normal executable and
  14073. the shared library. */
  14074. if (! bfd_link_pic (info)
  14075. && !h->def_regular)
  14076. {
  14077. h->root.u.def.section = htab->root.splt;
  14078. h->root.u.def.value = h->plt.offset;
  14079. /* Make sure the function is not marked as Thumb, in case
  14080. it is the target of an ABS32 relocation, which will
  14081. point to the PLT entry. */
  14082. ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
  14083. }
  14084. /* VxWorks executables have a second set of relocations for
  14085. each PLT entry. They go in a separate relocation section,
  14086. which is processed by the kernel loader. */
  14087. if (htab->root.target_os == is_vxworks && !bfd_link_pic (info))
  14088. {
  14089. /* There is a relocation for the initial PLT entry:
  14090. an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
  14091. if (h->plt.offset == htab->plt_header_size)
  14092. elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
  14093. /* There are two extra relocations for each subsequent
  14094. PLT entry: an R_ARM_32 relocation for the GOT entry,
  14095. and an R_ARM_32 relocation for the PLT entry. */
  14096. elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
  14097. }
  14098. }
  14099. else
  14100. {
  14101. h->plt.offset = (bfd_vma) -1;
  14102. h->needs_plt = 0;
  14103. }
  14104. }
  14105. else
  14106. {
  14107. h->plt.offset = (bfd_vma) -1;
  14108. h->needs_plt = 0;
  14109. }
  14110. eh = (struct elf32_arm_link_hash_entry *) h;
  14111. eh->tlsdesc_got = (bfd_vma) -1;
  14112. if (h->got.refcount > 0)
  14113. {
  14114. asection *s;
  14115. bool dyn;
  14116. int tls_type = elf32_arm_hash_entry (h)->tls_type;
  14117. int indx;
  14118. /* Make sure this symbol is output as a dynamic symbol.
  14119. Undefined weak syms won't yet be marked as dynamic. */
  14120. if (htab->root.dynamic_sections_created
  14121. && h->dynindx == -1
  14122. && !h->forced_local
  14123. && h->root.type == bfd_link_hash_undefweak)
  14124. {
  14125. if (! bfd_elf_link_record_dynamic_symbol (info, h))
  14126. return false;
  14127. }
  14128. s = htab->root.sgot;
  14129. h->got.offset = s->size;
  14130. if (tls_type == GOT_UNKNOWN)
  14131. abort ();
  14132. if (tls_type == GOT_NORMAL)
  14133. /* Non-TLS symbols need one GOT slot. */
  14134. s->size += 4;
  14135. else
  14136. {
  14137. if (tls_type & GOT_TLS_GDESC)
  14138. {
  14139. /* R_ARM_TLS_DESC needs 2 GOT slots. */
  14140. eh->tlsdesc_got
  14141. = (htab->root.sgotplt->size
  14142. - elf32_arm_compute_jump_table_size (htab));
  14143. htab->root.sgotplt->size += 8;
  14144. h->got.offset = (bfd_vma) -2;
  14145. /* plt.got_offset needs to know there's a TLS_DESC
  14146. reloc in the middle of .got.plt. */
  14147. htab->num_tls_desc++;
  14148. }
  14149. if (tls_type & GOT_TLS_GD)
  14150. {
  14151. /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
  14152. consecutive GOT slots. If the symbol is both GD
  14153. and GDESC, got.offset may have been
  14154. overwritten. */
  14155. h->got.offset = s->size;
  14156. s->size += 8;
  14157. }
  14158. if (tls_type & GOT_TLS_IE)
  14159. /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
  14160. slot. */
  14161. s->size += 4;
  14162. }
  14163. dyn = htab->root.dynamic_sections_created;
  14164. indx = 0;
  14165. if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
  14166. && (!bfd_link_pic (info)
  14167. || !SYMBOL_REFERENCES_LOCAL (info, h)))
  14168. indx = h->dynindx;
  14169. if (tls_type != GOT_NORMAL
  14170. && (bfd_link_dll (info) || indx != 0)
  14171. && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
  14172. || h->root.type != bfd_link_hash_undefweak))
  14173. {
  14174. if (tls_type & GOT_TLS_IE)
  14175. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  14176. if (tls_type & GOT_TLS_GD)
  14177. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  14178. if (tls_type & GOT_TLS_GDESC)
  14179. {
  14180. elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
  14181. /* GDESC needs a trampoline to jump to. */
  14182. htab->tls_trampoline = -1;
  14183. }
  14184. /* Only GD needs it. GDESC just emits one relocation per
  14185. 2 entries. */
  14186. if ((tls_type & GOT_TLS_GD) && indx != 0)
  14187. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  14188. }
  14189. else if (((indx != -1) || htab->fdpic_p)
  14190. && !SYMBOL_REFERENCES_LOCAL (info, h))
  14191. {
  14192. if (htab->root.dynamic_sections_created)
  14193. /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
  14194. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  14195. }
  14196. else if (h->type == STT_GNU_IFUNC
  14197. && eh->plt.noncall_refcount == 0)
  14198. /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
  14199. they all resolve dynamically instead. Reserve room for the
  14200. GOT entry's R_ARM_IRELATIVE relocation. */
  14201. elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
  14202. else if (bfd_link_pic (info)
  14203. && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
  14204. /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
  14205. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  14206. else if (htab->fdpic_p && tls_type == GOT_NORMAL)
  14207. /* Reserve room for rofixup for FDPIC executable. */
  14208. /* TLS relocs do not need space since they are completely
  14209. resolved. */
  14210. htab->srofixup->size += 4;
  14211. }
  14212. else
  14213. h->got.offset = (bfd_vma) -1;
  14214. /* FDPIC support. */
  14215. if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
  14216. {
  14217. /* Symbol musn't be exported. */
  14218. if (h->dynindx != -1)
  14219. abort ();
  14220. /* We only allocate one function descriptor with its associated
  14221. relocation. */
  14222. if (eh->fdpic_cnts.funcdesc_offset == -1)
  14223. {
  14224. asection *s = htab->root.sgot;
  14225. eh->fdpic_cnts.funcdesc_offset = s->size;
  14226. s->size += 8;
  14227. /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
  14228. if (bfd_link_pic (info))
  14229. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  14230. else
  14231. htab->srofixup->size += 8;
  14232. }
  14233. }
  14234. if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
  14235. {
  14236. asection *s = htab->root.sgot;
  14237. if (htab->root.dynamic_sections_created && h->dynindx == -1
  14238. && !h->forced_local)
  14239. if (! bfd_elf_link_record_dynamic_symbol (info, h))
  14240. return false;
  14241. if (h->dynindx == -1)
  14242. {
  14243. /* We only allocate one function descriptor with its
  14244. associated relocation. */
  14245. if (eh->fdpic_cnts.funcdesc_offset == -1)
  14246. {
  14247. eh->fdpic_cnts.funcdesc_offset = s->size;
  14248. s->size += 8;
  14249. /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
  14250. rofixups. */
  14251. if (bfd_link_pic (info))
  14252. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  14253. else
  14254. htab->srofixup->size += 8;
  14255. }
  14256. }
  14257. /* Add one entry into the GOT and a R_ARM_FUNCDESC or
  14258. R_ARM_RELATIVE/rofixup relocation on it. */
  14259. eh->fdpic_cnts.gotfuncdesc_offset = s->size;
  14260. s->size += 4;
  14261. if (h->dynindx == -1 && !bfd_link_pic (info))
  14262. htab->srofixup->size += 4;
  14263. else
  14264. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  14265. }
  14266. if (eh->fdpic_cnts.funcdesc_cnt > 0)
  14267. {
  14268. if (htab->root.dynamic_sections_created && h->dynindx == -1
  14269. && !h->forced_local)
  14270. if (! bfd_elf_link_record_dynamic_symbol (info, h))
  14271. return false;
  14272. if (h->dynindx == -1)
  14273. {
  14274. /* We only allocate one function descriptor with its
  14275. associated relocation. */
  14276. if (eh->fdpic_cnts.funcdesc_offset == -1)
  14277. {
  14278. asection *s = htab->root.sgot;
  14279. eh->fdpic_cnts.funcdesc_offset = s->size;
  14280. s->size += 8;
  14281. /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
  14282. rofixups. */
  14283. if (bfd_link_pic (info))
  14284. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  14285. else
  14286. htab->srofixup->size += 8;
  14287. }
  14288. }
  14289. if (h->dynindx == -1 && !bfd_link_pic (info))
  14290. {
  14291. /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
  14292. htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
  14293. }
  14294. else
  14295. {
  14296. /* Will need one dynamic reloc per reference. will be either
  14297. R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
  14298. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
  14299. eh->fdpic_cnts.funcdesc_cnt);
  14300. }
  14301. }
  14302. /* Allocate stubs for exported Thumb functions on v4t. */
  14303. if (!htab->use_blx && h->dynindx != -1
  14304. && h->def_regular
  14305. && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
  14306. && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
  14307. {
  14308. struct elf_link_hash_entry * th;
  14309. struct bfd_link_hash_entry * bh;
  14310. struct elf_link_hash_entry * myh;
  14311. char name[1024];
  14312. asection *s;
  14313. bh = NULL;
  14314. /* Create a new symbol to regist the real location of the function. */
  14315. s = h->root.u.def.section;
  14316. sprintf (name, "__real_%s", h->root.root.string);
  14317. _bfd_generic_link_add_one_symbol (info, s->owner,
  14318. name, BSF_GLOBAL, s,
  14319. h->root.u.def.value,
  14320. NULL, true, false, &bh);
  14321. myh = (struct elf_link_hash_entry *) bh;
  14322. myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
  14323. myh->forced_local = 1;
  14324. ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
  14325. eh->export_glue = myh;
  14326. th = record_arm_to_thumb_glue (info, h);
  14327. /* Point the symbol at the stub. */
  14328. h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
  14329. ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
  14330. h->root.u.def.section = th->root.u.def.section;
  14331. h->root.u.def.value = th->root.u.def.value & ~1;
  14332. }
  14333. if (h->dyn_relocs == NULL)
  14334. return true;
  14335. /* In the shared -Bsymbolic case, discard space allocated for
  14336. dynamic pc-relative relocs against symbols which turn out to be
  14337. defined in regular objects. For the normal shared case, discard
  14338. space for pc-relative relocs that have become local due to symbol
  14339. visibility changes. */
  14340. if (bfd_link_pic (info)
  14341. || htab->root.is_relocatable_executable
  14342. || htab->fdpic_p)
  14343. {
  14344. /* Relocs that use pc_count are PC-relative forms, which will appear
  14345. on something like ".long foo - ." or "movw REG, foo - .". We want
  14346. calls to protected symbols to resolve directly to the function
  14347. rather than going via the plt. If people want function pointer
  14348. comparisons to work as expected then they should avoid writing
  14349. assembly like ".long foo - .". */
  14350. if (SYMBOL_CALLS_LOCAL (info, h))
  14351. {
  14352. struct elf_dyn_relocs **pp;
  14353. for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
  14354. {
  14355. p->count -= p->pc_count;
  14356. p->pc_count = 0;
  14357. if (p->count == 0)
  14358. *pp = p->next;
  14359. else
  14360. pp = &p->next;
  14361. }
  14362. }
  14363. if (htab->root.target_os == is_vxworks)
  14364. {
  14365. struct elf_dyn_relocs **pp;
  14366. for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
  14367. {
  14368. if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
  14369. *pp = p->next;
  14370. else
  14371. pp = &p->next;
  14372. }
  14373. }
  14374. /* Also discard relocs on undefined weak syms with non-default
  14375. visibility. */
  14376. if (h->dyn_relocs != NULL
  14377. && h->root.type == bfd_link_hash_undefweak)
  14378. {
  14379. if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
  14380. || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
  14381. h->dyn_relocs = NULL;
  14382. /* Make sure undefined weak symbols are output as a dynamic
  14383. symbol in PIEs. */
  14384. else if (htab->root.dynamic_sections_created && h->dynindx == -1
  14385. && !h->forced_local)
  14386. {
  14387. if (! bfd_elf_link_record_dynamic_symbol (info, h))
  14388. return false;
  14389. }
  14390. }
  14391. else if (htab->root.is_relocatable_executable && h->dynindx == -1
  14392. && h->root.type == bfd_link_hash_new)
  14393. {
  14394. /* Output absolute symbols so that we can create relocations
  14395. against them. For normal symbols we output a relocation
  14396. against the section that contains them. */
  14397. if (! bfd_elf_link_record_dynamic_symbol (info, h))
  14398. return false;
  14399. }
  14400. }
  14401. else
  14402. {
  14403. /* For the non-shared case, discard space for relocs against
  14404. symbols which turn out to need copy relocs or are not
  14405. dynamic. */
  14406. if (!h->non_got_ref
  14407. && ((h->def_dynamic
  14408. && !h->def_regular)
  14409. || (htab->root.dynamic_sections_created
  14410. && (h->root.type == bfd_link_hash_undefweak
  14411. || h->root.type == bfd_link_hash_undefined))))
  14412. {
  14413. /* Make sure this symbol is output as a dynamic symbol.
  14414. Undefined weak syms won't yet be marked as dynamic. */
  14415. if (h->dynindx == -1 && !h->forced_local
  14416. && h->root.type == bfd_link_hash_undefweak)
  14417. {
  14418. if (! bfd_elf_link_record_dynamic_symbol (info, h))
  14419. return false;
  14420. }
  14421. /* If that succeeded, we know we'll be keeping all the
  14422. relocs. */
  14423. if (h->dynindx != -1)
  14424. goto keep;
  14425. }
  14426. h->dyn_relocs = NULL;
  14427. keep: ;
  14428. }
  14429. /* Finally, allocate space. */
  14430. for (p = h->dyn_relocs; p != NULL; p = p->next)
  14431. {
  14432. asection *sreloc = elf_section_data (p->sec)->sreloc;
  14433. if (h->type == STT_GNU_IFUNC
  14434. && eh->plt.noncall_refcount == 0
  14435. && SYMBOL_REFERENCES_LOCAL (info, h))
  14436. elf32_arm_allocate_irelocs (info, sreloc, p->count);
  14437. else if (h->dynindx != -1
  14438. && (!bfd_link_pic (info) || !info->symbolic || !h->def_regular))
  14439. elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
  14440. else if (htab->fdpic_p && !bfd_link_pic (info))
  14441. htab->srofixup->size += 4 * p->count;
  14442. else
  14443. elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
  14444. }
  14445. return true;
  14446. }
  14447. void
  14448. bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
  14449. int byteswap_code)
  14450. {
  14451. struct elf32_arm_link_hash_table *globals;
  14452. globals = elf32_arm_hash_table (info);
  14453. if (globals == NULL)
  14454. return;
  14455. globals->byteswap_code = byteswap_code;
  14456. }
  14457. /* Set the sizes of the dynamic sections. */
  14458. static bool
  14459. elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
  14460. struct bfd_link_info * info)
  14461. {
  14462. bfd * dynobj;
  14463. asection * s;
  14464. bool relocs;
  14465. bfd *ibfd;
  14466. struct elf32_arm_link_hash_table *htab;
  14467. htab = elf32_arm_hash_table (info);
  14468. if (htab == NULL)
  14469. return false;
  14470. dynobj = elf_hash_table (info)->dynobj;
  14471. BFD_ASSERT (dynobj != NULL);
  14472. check_use_blx (htab);
  14473. if (elf_hash_table (info)->dynamic_sections_created)
  14474. {
  14475. /* Set the contents of the .interp section to the interpreter. */
  14476. if (bfd_link_executable (info) && !info->nointerp)
  14477. {
  14478. s = bfd_get_linker_section (dynobj, ".interp");
  14479. BFD_ASSERT (s != NULL);
  14480. s->size = sizeof ELF_DYNAMIC_INTERPRETER;
  14481. s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
  14482. }
  14483. }
  14484. /* Set up .got offsets for local syms, and space for local dynamic
  14485. relocs. */
  14486. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  14487. {
  14488. bfd_signed_vma *local_got;
  14489. bfd_signed_vma *end_local_got;
  14490. struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
  14491. char *local_tls_type;
  14492. bfd_vma *local_tlsdesc_gotent;
  14493. bfd_size_type locsymcount;
  14494. Elf_Internal_Shdr *symtab_hdr;
  14495. asection *srel;
  14496. unsigned int symndx;
  14497. struct fdpic_local *local_fdpic_cnts;
  14498. if (! is_arm_elf (ibfd))
  14499. continue;
  14500. for (s = ibfd->sections; s != NULL; s = s->next)
  14501. {
  14502. struct elf_dyn_relocs *p;
  14503. for (p = (struct elf_dyn_relocs *)
  14504. elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
  14505. {
  14506. if (!bfd_is_abs_section (p->sec)
  14507. && bfd_is_abs_section (p->sec->output_section))
  14508. {
  14509. /* Input section has been discarded, either because
  14510. it is a copy of a linkonce section or due to
  14511. linker script /DISCARD/, so we'll be discarding
  14512. the relocs too. */
  14513. }
  14514. else if (htab->root.target_os == is_vxworks
  14515. && strcmp (p->sec->output_section->name,
  14516. ".tls_vars") == 0)
  14517. {
  14518. /* Relocations in vxworks .tls_vars sections are
  14519. handled specially by the loader. */
  14520. }
  14521. else if (p->count != 0)
  14522. {
  14523. srel = elf_section_data (p->sec)->sreloc;
  14524. if (htab->fdpic_p && !bfd_link_pic (info))
  14525. htab->srofixup->size += 4 * p->count;
  14526. else
  14527. elf32_arm_allocate_dynrelocs (info, srel, p->count);
  14528. if ((p->sec->output_section->flags & SEC_READONLY) != 0)
  14529. info->flags |= DF_TEXTREL;
  14530. }
  14531. }
  14532. }
  14533. local_got = elf_local_got_refcounts (ibfd);
  14534. if (local_got == NULL)
  14535. continue;
  14536. symtab_hdr = & elf_symtab_hdr (ibfd);
  14537. locsymcount = symtab_hdr->sh_info;
  14538. end_local_got = local_got + locsymcount;
  14539. local_iplt_ptr = elf32_arm_local_iplt (ibfd);
  14540. local_tls_type = elf32_arm_local_got_tls_type (ibfd);
  14541. local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
  14542. local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
  14543. symndx = 0;
  14544. s = htab->root.sgot;
  14545. srel = htab->root.srelgot;
  14546. for (; local_got < end_local_got;
  14547. ++local_got, ++local_iplt_ptr, ++local_tls_type,
  14548. ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
  14549. {
  14550. if (symndx >= elf32_arm_num_entries (ibfd))
  14551. return false;
  14552. *local_tlsdesc_gotent = (bfd_vma) -1;
  14553. local_iplt = *local_iplt_ptr;
  14554. /* FDPIC support. */
  14555. if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
  14556. {
  14557. if (local_fdpic_cnts->funcdesc_offset == -1)
  14558. {
  14559. local_fdpic_cnts->funcdesc_offset = s->size;
  14560. s->size += 8;
  14561. /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
  14562. if (bfd_link_pic (info))
  14563. elf32_arm_allocate_dynrelocs (info, srel, 1);
  14564. else
  14565. htab->srofixup->size += 8;
  14566. }
  14567. }
  14568. if (local_fdpic_cnts->funcdesc_cnt > 0)
  14569. {
  14570. if (local_fdpic_cnts->funcdesc_offset == -1)
  14571. {
  14572. local_fdpic_cnts->funcdesc_offset = s->size;
  14573. s->size += 8;
  14574. /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
  14575. if (bfd_link_pic (info))
  14576. elf32_arm_allocate_dynrelocs (info, srel, 1);
  14577. else
  14578. htab->srofixup->size += 8;
  14579. }
  14580. /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
  14581. if (bfd_link_pic (info))
  14582. elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
  14583. else
  14584. htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
  14585. }
  14586. if (local_iplt != NULL)
  14587. {
  14588. struct elf_dyn_relocs *p;
  14589. if (local_iplt->root.refcount > 0)
  14590. {
  14591. elf32_arm_allocate_plt_entry (info, true,
  14592. &local_iplt->root,
  14593. &local_iplt->arm);
  14594. if (local_iplt->arm.noncall_refcount == 0)
  14595. /* All references to the PLT are calls, so all
  14596. non-call references can resolve directly to the
  14597. run-time target. This means that the .got entry
  14598. would be the same as the .igot.plt entry, so there's
  14599. no point creating both. */
  14600. *local_got = 0;
  14601. }
  14602. else
  14603. {
  14604. BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
  14605. local_iplt->root.offset = (bfd_vma) -1;
  14606. }
  14607. for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
  14608. {
  14609. asection *psrel;
  14610. psrel = elf_section_data (p->sec)->sreloc;
  14611. if (local_iplt->arm.noncall_refcount == 0)
  14612. elf32_arm_allocate_irelocs (info, psrel, p->count);
  14613. else
  14614. elf32_arm_allocate_dynrelocs (info, psrel, p->count);
  14615. }
  14616. }
  14617. if (*local_got > 0)
  14618. {
  14619. Elf_Internal_Sym *isym;
  14620. *local_got = s->size;
  14621. if (*local_tls_type & GOT_TLS_GD)
  14622. /* TLS_GD relocs need an 8-byte structure in the GOT. */
  14623. s->size += 8;
  14624. if (*local_tls_type & GOT_TLS_GDESC)
  14625. {
  14626. *local_tlsdesc_gotent = htab->root.sgotplt->size
  14627. - elf32_arm_compute_jump_table_size (htab);
  14628. htab->root.sgotplt->size += 8;
  14629. *local_got = (bfd_vma) -2;
  14630. /* plt.got_offset needs to know there's a TLS_DESC
  14631. reloc in the middle of .got.plt. */
  14632. htab->num_tls_desc++;
  14633. }
  14634. if (*local_tls_type & GOT_TLS_IE)
  14635. s->size += 4;
  14636. if (*local_tls_type & GOT_NORMAL)
  14637. {
  14638. /* If the symbol is both GD and GDESC, *local_got
  14639. may have been overwritten. */
  14640. *local_got = s->size;
  14641. s->size += 4;
  14642. }
  14643. isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, ibfd,
  14644. symndx);
  14645. if (isym == NULL)
  14646. return false;
  14647. /* If all references to an STT_GNU_IFUNC PLT are calls,
  14648. then all non-call references, including this GOT entry,
  14649. resolve directly to the run-time target. */
  14650. if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
  14651. && (local_iplt == NULL
  14652. || local_iplt->arm.noncall_refcount == 0))
  14653. elf32_arm_allocate_irelocs (info, srel, 1);
  14654. else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
  14655. {
  14656. if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
  14657. elf32_arm_allocate_dynrelocs (info, srel, 1);
  14658. else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
  14659. htab->srofixup->size += 4;
  14660. if ((bfd_link_pic (info) || htab->fdpic_p)
  14661. && *local_tls_type & GOT_TLS_GDESC)
  14662. {
  14663. elf32_arm_allocate_dynrelocs (info,
  14664. htab->root.srelplt, 1);
  14665. htab->tls_trampoline = -1;
  14666. }
  14667. }
  14668. }
  14669. else
  14670. *local_got = (bfd_vma) -1;
  14671. }
  14672. }
  14673. if (htab->tls_ldm_got.refcount > 0)
  14674. {
  14675. /* Allocate two GOT entries and one dynamic relocation (if necessary)
  14676. for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
  14677. htab->tls_ldm_got.offset = htab->root.sgot->size;
  14678. htab->root.sgot->size += 8;
  14679. if (bfd_link_pic (info))
  14680. elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
  14681. }
  14682. else
  14683. htab->tls_ldm_got.offset = -1;
  14684. /* At the very end of the .rofixup section is a pointer to the GOT,
  14685. reserve space for it. */
  14686. if (htab->fdpic_p && htab->srofixup != NULL)
  14687. htab->srofixup->size += 4;
  14688. /* Allocate global sym .plt and .got entries, and space for global
  14689. sym dynamic relocs. */
  14690. elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
  14691. /* Here we rummage through the found bfds to collect glue information. */
  14692. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  14693. {
  14694. if (! is_arm_elf (ibfd))
  14695. continue;
  14696. /* Initialise mapping tables for code/data. */
  14697. bfd_elf32_arm_init_maps (ibfd);
  14698. if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
  14699. || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
  14700. || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
  14701. _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
  14702. }
  14703. /* Allocate space for the glue sections now that we've sized them. */
  14704. bfd_elf32_arm_allocate_interworking_sections (info);
  14705. /* For every jump slot reserved in the sgotplt, reloc_count is
  14706. incremented. However, when we reserve space for TLS descriptors,
  14707. it's not incremented, so in order to compute the space reserved
  14708. for them, it suffices to multiply the reloc count by the jump
  14709. slot size. */
  14710. if (htab->root.srelplt)
  14711. htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size (htab);
  14712. if (htab->tls_trampoline)
  14713. {
  14714. if (htab->root.splt->size == 0)
  14715. htab->root.splt->size += htab->plt_header_size;
  14716. htab->tls_trampoline = htab->root.splt->size;
  14717. htab->root.splt->size += htab->plt_entry_size;
  14718. /* If we're not using lazy TLS relocations, don't generate the
  14719. PLT and GOT entries they require. */
  14720. if ((info->flags & DF_BIND_NOW))
  14721. htab->root.tlsdesc_plt = 0;
  14722. else
  14723. {
  14724. htab->root.tlsdesc_got = htab->root.sgot->size;
  14725. htab->root.sgot->size += 4;
  14726. htab->root.tlsdesc_plt = htab->root.splt->size;
  14727. htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
  14728. }
  14729. }
  14730. /* The check_relocs and adjust_dynamic_symbol entry points have
  14731. determined the sizes of the various dynamic sections. Allocate
  14732. memory for them. */
  14733. relocs = false;
  14734. for (s = dynobj->sections; s != NULL; s = s->next)
  14735. {
  14736. const char * name;
  14737. if ((s->flags & SEC_LINKER_CREATED) == 0)
  14738. continue;
  14739. /* It's OK to base decisions on the section name, because none
  14740. of the dynobj section names depend upon the input files. */
  14741. name = bfd_section_name (s);
  14742. if (s == htab->root.splt)
  14743. {
  14744. /* Remember whether there is a PLT. */
  14745. ;
  14746. }
  14747. else if (startswith (name, ".rel"))
  14748. {
  14749. if (s->size != 0)
  14750. {
  14751. /* Remember whether there are any reloc sections other
  14752. than .rel(a).plt and .rela.plt.unloaded. */
  14753. if (s != htab->root.srelplt && s != htab->srelplt2)
  14754. relocs = true;
  14755. /* We use the reloc_count field as a counter if we need
  14756. to copy relocs into the output file. */
  14757. s->reloc_count = 0;
  14758. }
  14759. }
  14760. else if (s != htab->root.sgot
  14761. && s != htab->root.sgotplt
  14762. && s != htab->root.iplt
  14763. && s != htab->root.igotplt
  14764. && s != htab->root.sdynbss
  14765. && s != htab->root.sdynrelro
  14766. && s != htab->srofixup)
  14767. {
  14768. /* It's not one of our sections, so don't allocate space. */
  14769. continue;
  14770. }
  14771. if (s->size == 0)
  14772. {
  14773. /* If we don't need this section, strip it from the
  14774. output file. This is mostly to handle .rel(a).bss and
  14775. .rel(a).plt. We must create both sections in
  14776. create_dynamic_sections, because they must be created
  14777. before the linker maps input sections to output
  14778. sections. The linker does that before
  14779. adjust_dynamic_symbol is called, and it is that
  14780. function which decides whether anything needs to go
  14781. into these sections. */
  14782. s->flags |= SEC_EXCLUDE;
  14783. continue;
  14784. }
  14785. if ((s->flags & SEC_HAS_CONTENTS) == 0)
  14786. continue;
  14787. /* Allocate memory for the section contents. */
  14788. s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
  14789. if (s->contents == NULL)
  14790. return false;
  14791. }
  14792. return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd, info,
  14793. relocs);
  14794. }
  14795. /* Size sections even though they're not dynamic. We use it to setup
  14796. _TLS_MODULE_BASE_, if needed. */
  14797. static bool
  14798. elf32_arm_always_size_sections (bfd *output_bfd,
  14799. struct bfd_link_info *info)
  14800. {
  14801. asection *tls_sec;
  14802. struct elf32_arm_link_hash_table *htab;
  14803. htab = elf32_arm_hash_table (info);
  14804. if (bfd_link_relocatable (info))
  14805. return true;
  14806. tls_sec = elf_hash_table (info)->tls_sec;
  14807. if (tls_sec)
  14808. {
  14809. struct elf_link_hash_entry *tlsbase;
  14810. tlsbase = elf_link_hash_lookup
  14811. (elf_hash_table (info), "_TLS_MODULE_BASE_", true, true, false);
  14812. if (tlsbase)
  14813. {
  14814. struct bfd_link_hash_entry *bh = NULL;
  14815. const struct elf_backend_data *bed
  14816. = get_elf_backend_data (output_bfd);
  14817. if (!(_bfd_generic_link_add_one_symbol
  14818. (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
  14819. tls_sec, 0, NULL, false,
  14820. bed->collect, &bh)))
  14821. return false;
  14822. tlsbase->type = STT_TLS;
  14823. tlsbase = (struct elf_link_hash_entry *)bh;
  14824. tlsbase->def_regular = 1;
  14825. tlsbase->other = STV_HIDDEN;
  14826. (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
  14827. }
  14828. }
  14829. if (htab->fdpic_p && !bfd_link_relocatable (info)
  14830. && !bfd_elf_stack_segment_size (output_bfd, info,
  14831. "__stacksize", DEFAULT_STACK_SIZE))
  14832. return false;
  14833. return true;
  14834. }
  14835. /* Finish up dynamic symbol handling. We set the contents of various
  14836. dynamic sections here. */
  14837. static bool
  14838. elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
  14839. struct bfd_link_info * info,
  14840. struct elf_link_hash_entry * h,
  14841. Elf_Internal_Sym * sym)
  14842. {
  14843. struct elf32_arm_link_hash_table *htab;
  14844. struct elf32_arm_link_hash_entry *eh;
  14845. htab = elf32_arm_hash_table (info);
  14846. if (htab == NULL)
  14847. return false;
  14848. eh = (struct elf32_arm_link_hash_entry *) h;
  14849. if (h->plt.offset != (bfd_vma) -1)
  14850. {
  14851. if (!eh->is_iplt)
  14852. {
  14853. BFD_ASSERT (h->dynindx != -1);
  14854. if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
  14855. h->dynindx, 0))
  14856. return false;
  14857. }
  14858. if (!h->def_regular)
  14859. {
  14860. /* Mark the symbol as undefined, rather than as defined in
  14861. the .plt section. */
  14862. sym->st_shndx = SHN_UNDEF;
  14863. /* If the symbol is weak we need to clear the value.
  14864. Otherwise, the PLT entry would provide a definition for
  14865. the symbol even if the symbol wasn't defined anywhere,
  14866. and so the symbol would never be NULL. Leave the value if
  14867. there were any relocations where pointer equality matters
  14868. (this is a clue for the dynamic linker, to make function
  14869. pointer comparisons work between an application and shared
  14870. library). */
  14871. if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
  14872. sym->st_value = 0;
  14873. }
  14874. else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
  14875. {
  14876. /* At least one non-call relocation references this .iplt entry,
  14877. so the .iplt entry is the function's canonical address. */
  14878. sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
  14879. ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
  14880. sym->st_shndx = (_bfd_elf_section_from_bfd_section
  14881. (output_bfd, htab->root.iplt->output_section));
  14882. sym->st_value = (h->plt.offset
  14883. + htab->root.iplt->output_section->vma
  14884. + htab->root.iplt->output_offset);
  14885. }
  14886. }
  14887. if (h->needs_copy)
  14888. {
  14889. asection * s;
  14890. Elf_Internal_Rela rel;
  14891. /* This symbol needs a copy reloc. Set it up. */
  14892. BFD_ASSERT (h->dynindx != -1
  14893. && (h->root.type == bfd_link_hash_defined
  14894. || h->root.type == bfd_link_hash_defweak));
  14895. rel.r_addend = 0;
  14896. rel.r_offset = (h->root.u.def.value
  14897. + h->root.u.def.section->output_section->vma
  14898. + h->root.u.def.section->output_offset);
  14899. rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
  14900. if (h->root.u.def.section == htab->root.sdynrelro)
  14901. s = htab->root.sreldynrelro;
  14902. else
  14903. s = htab->root.srelbss;
  14904. elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
  14905. }
  14906. /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
  14907. and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
  14908. it is relative to the ".got" section. */
  14909. if (h == htab->root.hdynamic
  14910. || (!htab->fdpic_p
  14911. && htab->root.target_os != is_vxworks
  14912. && h == htab->root.hgot))
  14913. sym->st_shndx = SHN_ABS;
  14914. return true;
  14915. }
  14916. static void
  14917. arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
  14918. void *contents,
  14919. const unsigned long *template, unsigned count)
  14920. {
  14921. unsigned ix;
  14922. for (ix = 0; ix != count; ix++)
  14923. {
  14924. unsigned long insn = template[ix];
  14925. /* Emit mov pc,rx if bx is not permitted. */
  14926. if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
  14927. insn = (insn & 0xf000000f) | 0x01a0f000;
  14928. put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
  14929. }
  14930. }
  14931. /* Install the special first PLT entry for elf32-arm-nacl. Unlike
  14932. other variants, NaCl needs this entry in a static executable's
  14933. .iplt too. When we're handling that case, GOT_DISPLACEMENT is
  14934. zero. For .iplt really only the last bundle is useful, and .iplt
  14935. could have a shorter first entry, with each individual PLT entry's
  14936. relative branch calculated differently so it targets the last
  14937. bundle instead of the instruction before it (labelled .Lplt_tail
  14938. above). But it's simpler to keep the size and layout of PLT0
  14939. consistent with the dynamic case, at the cost of some dead code at
  14940. the start of .iplt and the one dead store to the stack at the start
  14941. of .Lplt_tail. */
  14942. static void
  14943. arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
  14944. asection *plt, bfd_vma got_displacement)
  14945. {
  14946. unsigned int i;
  14947. put_arm_insn (htab, output_bfd,
  14948. elf32_arm_nacl_plt0_entry[0]
  14949. | arm_movw_immediate (got_displacement),
  14950. plt->contents + 0);
  14951. put_arm_insn (htab, output_bfd,
  14952. elf32_arm_nacl_plt0_entry[1]
  14953. | arm_movt_immediate (got_displacement),
  14954. plt->contents + 4);
  14955. for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
  14956. put_arm_insn (htab, output_bfd,
  14957. elf32_arm_nacl_plt0_entry[i],
  14958. plt->contents + (i * 4));
  14959. }
  14960. /* Finish up the dynamic sections. */
  14961. static bool
  14962. elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
  14963. {
  14964. bfd * dynobj;
  14965. asection * sgot;
  14966. asection * sdyn;
  14967. struct elf32_arm_link_hash_table *htab;
  14968. htab = elf32_arm_hash_table (info);
  14969. if (htab == NULL)
  14970. return false;
  14971. dynobj = elf_hash_table (info)->dynobj;
  14972. sgot = htab->root.sgotplt;
  14973. /* A broken linker script might have discarded the dynamic sections.
  14974. Catch this here so that we do not seg-fault later on. */
  14975. if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
  14976. return false;
  14977. sdyn = bfd_get_linker_section (dynobj, ".dynamic");
  14978. if (elf_hash_table (info)->dynamic_sections_created)
  14979. {
  14980. asection *splt;
  14981. Elf32_External_Dyn *dyncon, *dynconend;
  14982. splt = htab->root.splt;
  14983. BFD_ASSERT (splt != NULL && sdyn != NULL);
  14984. BFD_ASSERT (sgot != NULL);
  14985. dyncon = (Elf32_External_Dyn *) sdyn->contents;
  14986. dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
  14987. for (; dyncon < dynconend; dyncon++)
  14988. {
  14989. Elf_Internal_Dyn dyn;
  14990. const char * name;
  14991. asection * s;
  14992. bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
  14993. switch (dyn.d_tag)
  14994. {
  14995. default:
  14996. if (htab->root.target_os == is_vxworks
  14997. && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
  14998. bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
  14999. break;
  15000. case DT_HASH:
  15001. case DT_STRTAB:
  15002. case DT_SYMTAB:
  15003. case DT_VERSYM:
  15004. case DT_VERDEF:
  15005. case DT_VERNEED:
  15006. break;
  15007. case DT_PLTGOT:
  15008. name = ".got.plt";
  15009. goto get_vma;
  15010. case DT_JMPREL:
  15011. name = RELOC_SECTION (htab, ".plt");
  15012. get_vma:
  15013. s = bfd_get_linker_section (dynobj, name);
  15014. if (s == NULL)
  15015. {
  15016. _bfd_error_handler
  15017. (_("could not find section %s"), name);
  15018. bfd_set_error (bfd_error_invalid_operation);
  15019. return false;
  15020. }
  15021. dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
  15022. bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
  15023. break;
  15024. case DT_PLTRELSZ:
  15025. s = htab->root.srelplt;
  15026. BFD_ASSERT (s != NULL);
  15027. dyn.d_un.d_val = s->size;
  15028. bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
  15029. break;
  15030. case DT_RELSZ:
  15031. case DT_RELASZ:
  15032. case DT_REL:
  15033. case DT_RELA:
  15034. break;
  15035. case DT_TLSDESC_PLT:
  15036. s = htab->root.splt;
  15037. dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
  15038. + htab->root.tlsdesc_plt);
  15039. bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
  15040. break;
  15041. case DT_TLSDESC_GOT:
  15042. s = htab->root.sgot;
  15043. dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
  15044. + htab->root.tlsdesc_got);
  15045. bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
  15046. break;
  15047. /* Set the bottom bit of DT_INIT/FINI if the
  15048. corresponding function is Thumb. */
  15049. case DT_INIT:
  15050. name = info->init_function;
  15051. goto get_sym;
  15052. case DT_FINI:
  15053. name = info->fini_function;
  15054. get_sym:
  15055. /* If it wasn't set by elf_bfd_final_link
  15056. then there is nothing to adjust. */
  15057. if (dyn.d_un.d_val != 0)
  15058. {
  15059. struct elf_link_hash_entry * eh;
  15060. eh = elf_link_hash_lookup (elf_hash_table (info), name,
  15061. false, false, true);
  15062. if (eh != NULL
  15063. && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
  15064. == ST_BRANCH_TO_THUMB)
  15065. {
  15066. dyn.d_un.d_val |= 1;
  15067. bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
  15068. }
  15069. }
  15070. break;
  15071. }
  15072. }
  15073. /* Fill in the first entry in the procedure linkage table. */
  15074. if (splt->size > 0 && htab->plt_header_size)
  15075. {
  15076. const bfd_vma *plt0_entry;
  15077. bfd_vma got_address, plt_address, got_displacement;
  15078. /* Calculate the addresses of the GOT and PLT. */
  15079. got_address = sgot->output_section->vma + sgot->output_offset;
  15080. plt_address = splt->output_section->vma + splt->output_offset;
  15081. if (htab->root.target_os == is_vxworks)
  15082. {
  15083. /* The VxWorks GOT is relocated by the dynamic linker.
  15084. Therefore, we must emit relocations rather than simply
  15085. computing the values now. */
  15086. Elf_Internal_Rela rel;
  15087. plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
  15088. put_arm_insn (htab, output_bfd, plt0_entry[0],
  15089. splt->contents + 0);
  15090. put_arm_insn (htab, output_bfd, plt0_entry[1],
  15091. splt->contents + 4);
  15092. put_arm_insn (htab, output_bfd, plt0_entry[2],
  15093. splt->contents + 8);
  15094. bfd_put_32 (output_bfd, got_address, splt->contents + 12);
  15095. /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
  15096. rel.r_offset = plt_address + 12;
  15097. rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
  15098. rel.r_addend = 0;
  15099. SWAP_RELOC_OUT (htab) (output_bfd, &rel,
  15100. htab->srelplt2->contents);
  15101. }
  15102. else if (htab->root.target_os == is_nacl)
  15103. arm_nacl_put_plt0 (htab, output_bfd, splt,
  15104. got_address + 8 - (plt_address + 16));
  15105. else if (using_thumb_only (htab))
  15106. {
  15107. got_displacement = got_address - (plt_address + 12);
  15108. plt0_entry = elf32_thumb2_plt0_entry;
  15109. put_arm_insn (htab, output_bfd, plt0_entry[0],
  15110. splt->contents + 0);
  15111. put_arm_insn (htab, output_bfd, plt0_entry[1],
  15112. splt->contents + 4);
  15113. put_arm_insn (htab, output_bfd, plt0_entry[2],
  15114. splt->contents + 8);
  15115. bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
  15116. }
  15117. else
  15118. {
  15119. got_displacement = got_address - (plt_address + 16);
  15120. plt0_entry = elf32_arm_plt0_entry;
  15121. put_arm_insn (htab, output_bfd, plt0_entry[0],
  15122. splt->contents + 0);
  15123. put_arm_insn (htab, output_bfd, plt0_entry[1],
  15124. splt->contents + 4);
  15125. put_arm_insn (htab, output_bfd, plt0_entry[2],
  15126. splt->contents + 8);
  15127. put_arm_insn (htab, output_bfd, plt0_entry[3],
  15128. splt->contents + 12);
  15129. #ifdef FOUR_WORD_PLT
  15130. /* The displacement value goes in the otherwise-unused
  15131. last word of the second entry. */
  15132. bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
  15133. #else
  15134. bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
  15135. #endif
  15136. }
  15137. }
  15138. /* UnixWare sets the entsize of .plt to 4, although that doesn't
  15139. really seem like the right value. */
  15140. if (splt->output_section->owner == output_bfd)
  15141. elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
  15142. if (htab->root.tlsdesc_plt)
  15143. {
  15144. bfd_vma got_address
  15145. = sgot->output_section->vma + sgot->output_offset;
  15146. bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
  15147. + htab->root.sgot->output_offset);
  15148. bfd_vma plt_address
  15149. = splt->output_section->vma + splt->output_offset;
  15150. arm_put_trampoline (htab, output_bfd,
  15151. splt->contents + htab->root.tlsdesc_plt,
  15152. dl_tlsdesc_lazy_trampoline, 6);
  15153. bfd_put_32 (output_bfd,
  15154. gotplt_address + htab->root.tlsdesc_got
  15155. - (plt_address + htab->root.tlsdesc_plt)
  15156. - dl_tlsdesc_lazy_trampoline[6],
  15157. splt->contents + htab->root.tlsdesc_plt + 24);
  15158. bfd_put_32 (output_bfd,
  15159. got_address - (plt_address + htab->root.tlsdesc_plt)
  15160. - dl_tlsdesc_lazy_trampoline[7],
  15161. splt->contents + htab->root.tlsdesc_plt + 24 + 4);
  15162. }
  15163. if (htab->tls_trampoline)
  15164. {
  15165. arm_put_trampoline (htab, output_bfd,
  15166. splt->contents + htab->tls_trampoline,
  15167. tls_trampoline, 3);
  15168. #ifdef FOUR_WORD_PLT
  15169. bfd_put_32 (output_bfd, 0x00000000,
  15170. splt->contents + htab->tls_trampoline + 12);
  15171. #endif
  15172. }
  15173. if (htab->root.target_os == is_vxworks
  15174. && !bfd_link_pic (info)
  15175. && htab->root.splt->size > 0)
  15176. {
  15177. /* Correct the .rel(a).plt.unloaded relocations. They will have
  15178. incorrect symbol indexes. */
  15179. int num_plts;
  15180. unsigned char *p;
  15181. num_plts = ((htab->root.splt->size - htab->plt_header_size)
  15182. / htab->plt_entry_size);
  15183. p = htab->srelplt2->contents + RELOC_SIZE (htab);
  15184. for (; num_plts; num_plts--)
  15185. {
  15186. Elf_Internal_Rela rel;
  15187. SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
  15188. rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
  15189. SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
  15190. p += RELOC_SIZE (htab);
  15191. SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
  15192. rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
  15193. SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
  15194. p += RELOC_SIZE (htab);
  15195. }
  15196. }
  15197. }
  15198. if (htab->root.target_os == is_nacl
  15199. && htab->root.iplt != NULL
  15200. && htab->root.iplt->size > 0)
  15201. /* NaCl uses a special first entry in .iplt too. */
  15202. arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
  15203. /* Fill in the first three entries in the global offset table. */
  15204. if (sgot)
  15205. {
  15206. if (sgot->size > 0)
  15207. {
  15208. if (sdyn == NULL)
  15209. bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
  15210. else
  15211. bfd_put_32 (output_bfd,
  15212. sdyn->output_section->vma + sdyn->output_offset,
  15213. sgot->contents);
  15214. bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
  15215. bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
  15216. }
  15217. elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
  15218. }
  15219. /* At the very end of the .rofixup section is a pointer to the GOT. */
  15220. if (htab->fdpic_p && htab->srofixup != NULL)
  15221. {
  15222. struct elf_link_hash_entry *hgot = htab->root.hgot;
  15223. bfd_vma got_value = hgot->root.u.def.value
  15224. + hgot->root.u.def.section->output_section->vma
  15225. + hgot->root.u.def.section->output_offset;
  15226. arm_elf_add_rofixup (output_bfd, htab->srofixup, got_value);
  15227. /* Make sure we allocated and generated the same number of fixups. */
  15228. BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
  15229. }
  15230. return true;
  15231. }
  15232. static bool
  15233. elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
  15234. {
  15235. Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
  15236. struct elf32_arm_link_hash_table *globals;
  15237. struct elf_segment_map *m;
  15238. if (!_bfd_elf_init_file_header (abfd, link_info))
  15239. return false;
  15240. i_ehdrp = elf_elfheader (abfd);
  15241. if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
  15242. i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
  15243. i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
  15244. if (link_info)
  15245. {
  15246. globals = elf32_arm_hash_table (link_info);
  15247. if (globals != NULL && globals->byteswap_code)
  15248. i_ehdrp->e_flags |= EF_ARM_BE8;
  15249. if (globals->fdpic_p)
  15250. i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
  15251. }
  15252. if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
  15253. && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
  15254. {
  15255. int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
  15256. if (abi == AEABI_VFP_args_vfp)
  15257. i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
  15258. else
  15259. i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
  15260. }
  15261. /* Scan segment to set p_flags attribute if it contains only sections with
  15262. SHF_ARM_PURECODE flag. */
  15263. for (m = elf_seg_map (abfd); m != NULL; m = m->next)
  15264. {
  15265. unsigned int j;
  15266. if (m->count == 0)
  15267. continue;
  15268. for (j = 0; j < m->count; j++)
  15269. {
  15270. if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
  15271. break;
  15272. }
  15273. if (j == m->count)
  15274. {
  15275. m->p_flags = PF_X;
  15276. m->p_flags_valid = 1;
  15277. }
  15278. }
  15279. return true;
  15280. }
  15281. static enum elf_reloc_type_class
  15282. elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
  15283. const asection *rel_sec ATTRIBUTE_UNUSED,
  15284. const Elf_Internal_Rela *rela)
  15285. {
  15286. switch ((int) ELF32_R_TYPE (rela->r_info))
  15287. {
  15288. case R_ARM_RELATIVE:
  15289. return reloc_class_relative;
  15290. case R_ARM_JUMP_SLOT:
  15291. return reloc_class_plt;
  15292. case R_ARM_COPY:
  15293. return reloc_class_copy;
  15294. case R_ARM_IRELATIVE:
  15295. return reloc_class_ifunc;
  15296. default:
  15297. return reloc_class_normal;
  15298. }
  15299. }
  15300. static void
  15301. arm_final_write_processing (bfd *abfd)
  15302. {
  15303. bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
  15304. }
  15305. static bool
  15306. elf32_arm_final_write_processing (bfd *abfd)
  15307. {
  15308. arm_final_write_processing (abfd);
  15309. return _bfd_elf_final_write_processing (abfd);
  15310. }
  15311. /* Return TRUE if this is an unwinding table entry. */
  15312. static bool
  15313. is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
  15314. {
  15315. return (startswith (name, ELF_STRING_ARM_unwind)
  15316. || startswith (name, ELF_STRING_ARM_unwind_once));
  15317. }
  15318. /* Set the type and flags for an ARM section. We do this by
  15319. the section name, which is a hack, but ought to work. */
  15320. static bool
  15321. elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
  15322. {
  15323. const char * name;
  15324. name = bfd_section_name (sec);
  15325. if (is_arm_elf_unwind_section_name (abfd, name))
  15326. {
  15327. hdr->sh_type = SHT_ARM_EXIDX;
  15328. hdr->sh_flags |= SHF_LINK_ORDER;
  15329. }
  15330. if (sec->flags & SEC_ELF_PURECODE)
  15331. hdr->sh_flags |= SHF_ARM_PURECODE;
  15332. return true;
  15333. }
  15334. /* Handle an ARM specific section when reading an object file. This is
  15335. called when bfd_section_from_shdr finds a section with an unknown
  15336. type. */
  15337. static bool
  15338. elf32_arm_section_from_shdr (bfd *abfd,
  15339. Elf_Internal_Shdr * hdr,
  15340. const char *name,
  15341. int shindex)
  15342. {
  15343. /* There ought to be a place to keep ELF backend specific flags, but
  15344. at the moment there isn't one. We just keep track of the
  15345. sections by their name, instead. Fortunately, the ABI gives
  15346. names for all the ARM specific sections, so we will probably get
  15347. away with this. */
  15348. switch (hdr->sh_type)
  15349. {
  15350. case SHT_ARM_EXIDX:
  15351. case SHT_ARM_PREEMPTMAP:
  15352. case SHT_ARM_ATTRIBUTES:
  15353. break;
  15354. default:
  15355. return false;
  15356. }
  15357. if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
  15358. return false;
  15359. return true;
  15360. }
  15361. static _arm_elf_section_data *
  15362. get_arm_elf_section_data (asection * sec)
  15363. {
  15364. if (sec && sec->owner && is_arm_elf (sec->owner))
  15365. return elf32_arm_section_data (sec);
  15366. else
  15367. return NULL;
  15368. }
  15369. typedef struct
  15370. {
  15371. void *flaginfo;
  15372. struct bfd_link_info *info;
  15373. asection *sec;
  15374. int sec_shndx;
  15375. int (*func) (void *, const char *, Elf_Internal_Sym *,
  15376. asection *, struct elf_link_hash_entry *);
  15377. } output_arch_syminfo;
  15378. enum map_symbol_type
  15379. {
  15380. ARM_MAP_ARM,
  15381. ARM_MAP_THUMB,
  15382. ARM_MAP_DATA
  15383. };
  15384. /* Output a single mapping symbol. */
  15385. static bool
  15386. elf32_arm_output_map_sym (output_arch_syminfo *osi,
  15387. enum map_symbol_type type,
  15388. bfd_vma offset)
  15389. {
  15390. static const char *names[3] = {"$a", "$t", "$d"};
  15391. Elf_Internal_Sym sym;
  15392. sym.st_value = osi->sec->output_section->vma
  15393. + osi->sec->output_offset
  15394. + offset;
  15395. sym.st_size = 0;
  15396. sym.st_other = 0;
  15397. sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
  15398. sym.st_shndx = osi->sec_shndx;
  15399. sym.st_target_internal = 0;
  15400. elf32_arm_section_map_add (osi->sec, names[type][1], offset);
  15401. return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
  15402. }
  15403. /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
  15404. IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
  15405. static bool
  15406. elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
  15407. bool is_iplt_entry_p,
  15408. union gotplt_union *root_plt,
  15409. struct arm_plt_info *arm_plt)
  15410. {
  15411. struct elf32_arm_link_hash_table *htab;
  15412. bfd_vma addr, plt_header_size;
  15413. if (root_plt->offset == (bfd_vma) -1)
  15414. return true;
  15415. htab = elf32_arm_hash_table (osi->info);
  15416. if (htab == NULL)
  15417. return false;
  15418. if (is_iplt_entry_p)
  15419. {
  15420. osi->sec = htab->root.iplt;
  15421. plt_header_size = 0;
  15422. }
  15423. else
  15424. {
  15425. osi->sec = htab->root.splt;
  15426. plt_header_size = htab->plt_header_size;
  15427. }
  15428. osi->sec_shndx = (_bfd_elf_section_from_bfd_section
  15429. (osi->info->output_bfd, osi->sec->output_section));
  15430. addr = root_plt->offset & -2;
  15431. if (htab->root.target_os == is_vxworks)
  15432. {
  15433. if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
  15434. return false;
  15435. if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
  15436. return false;
  15437. if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
  15438. return false;
  15439. if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
  15440. return false;
  15441. }
  15442. else if (htab->root.target_os == is_nacl)
  15443. {
  15444. if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
  15445. return false;
  15446. }
  15447. else if (htab->fdpic_p)
  15448. {
  15449. enum map_symbol_type type = using_thumb_only (htab)
  15450. ? ARM_MAP_THUMB
  15451. : ARM_MAP_ARM;
  15452. if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
  15453. if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
  15454. return false;
  15455. if (!elf32_arm_output_map_sym (osi, type, addr))
  15456. return false;
  15457. if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
  15458. return false;
  15459. if (htab->plt_entry_size == 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry))
  15460. if (!elf32_arm_output_map_sym (osi, type, addr + 24))
  15461. return false;
  15462. }
  15463. else if (using_thumb_only (htab))
  15464. {
  15465. if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
  15466. return false;
  15467. }
  15468. else
  15469. {
  15470. bool thumb_stub_p;
  15471. thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
  15472. if (thumb_stub_p)
  15473. {
  15474. if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
  15475. return false;
  15476. }
  15477. #ifdef FOUR_WORD_PLT
  15478. if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
  15479. return false;
  15480. if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
  15481. return false;
  15482. #else
  15483. /* A three-word PLT with no Thumb thunk contains only Arm code,
  15484. so only need to output a mapping symbol for the first PLT entry and
  15485. entries with thumb thunks. */
  15486. if (thumb_stub_p || addr == plt_header_size)
  15487. {
  15488. if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
  15489. return false;
  15490. }
  15491. #endif
  15492. }
  15493. return true;
  15494. }
  15495. /* Output mapping symbols for PLT entries associated with H. */
  15496. static bool
  15497. elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
  15498. {
  15499. output_arch_syminfo *osi = (output_arch_syminfo *) inf;
  15500. struct elf32_arm_link_hash_entry *eh;
  15501. if (h->root.type == bfd_link_hash_indirect)
  15502. return true;
  15503. if (h->root.type == bfd_link_hash_warning)
  15504. /* When warning symbols are created, they **replace** the "real"
  15505. entry in the hash table, thus we never get to see the real
  15506. symbol in a hash traversal. So look at it now. */
  15507. h = (struct elf_link_hash_entry *) h->root.u.i.link;
  15508. eh = (struct elf32_arm_link_hash_entry *) h;
  15509. return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
  15510. &h->plt, &eh->plt);
  15511. }
  15512. /* Bind a veneered symbol to its veneer identified by its hash entry
  15513. STUB_ENTRY. The veneered location thus loose its symbol. */
  15514. static void
  15515. arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
  15516. {
  15517. struct elf32_arm_link_hash_entry *hash = stub_entry->h;
  15518. BFD_ASSERT (hash);
  15519. hash->root.root.u.def.section = stub_entry->stub_sec;
  15520. hash->root.root.u.def.value = stub_entry->stub_offset;
  15521. hash->root.size = stub_entry->stub_size;
  15522. }
  15523. /* Output a single local symbol for a generated stub. */
  15524. static bool
  15525. elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
  15526. bfd_vma offset, bfd_vma size)
  15527. {
  15528. Elf_Internal_Sym sym;
  15529. sym.st_value = osi->sec->output_section->vma
  15530. + osi->sec->output_offset
  15531. + offset;
  15532. sym.st_size = size;
  15533. sym.st_other = 0;
  15534. sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
  15535. sym.st_shndx = osi->sec_shndx;
  15536. sym.st_target_internal = 0;
  15537. return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
  15538. }
  15539. static bool
  15540. arm_map_one_stub (struct bfd_hash_entry * gen_entry,
  15541. void * in_arg)
  15542. {
  15543. struct elf32_arm_stub_hash_entry *stub_entry;
  15544. asection *stub_sec;
  15545. bfd_vma addr;
  15546. char *stub_name;
  15547. output_arch_syminfo *osi;
  15548. const insn_sequence *template_sequence;
  15549. enum stub_insn_type prev_type;
  15550. int size;
  15551. int i;
  15552. enum map_symbol_type sym_type;
  15553. /* Massage our args to the form they really have. */
  15554. stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
  15555. osi = (output_arch_syminfo *) in_arg;
  15556. stub_sec = stub_entry->stub_sec;
  15557. /* Ensure this stub is attached to the current section being
  15558. processed. */
  15559. if (stub_sec != osi->sec)
  15560. return true;
  15561. addr = (bfd_vma) stub_entry->stub_offset;
  15562. template_sequence = stub_entry->stub_template;
  15563. if (arm_stub_sym_claimed (stub_entry->stub_type))
  15564. arm_stub_claim_sym (stub_entry);
  15565. else
  15566. {
  15567. stub_name = stub_entry->output_name;
  15568. switch (template_sequence[0].type)
  15569. {
  15570. case ARM_TYPE:
  15571. if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
  15572. stub_entry->stub_size))
  15573. return false;
  15574. break;
  15575. case THUMB16_TYPE:
  15576. case THUMB32_TYPE:
  15577. if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
  15578. stub_entry->stub_size))
  15579. return false;
  15580. break;
  15581. default:
  15582. BFD_FAIL ();
  15583. return 0;
  15584. }
  15585. }
  15586. prev_type = DATA_TYPE;
  15587. size = 0;
  15588. for (i = 0; i < stub_entry->stub_template_size; i++)
  15589. {
  15590. switch (template_sequence[i].type)
  15591. {
  15592. case ARM_TYPE:
  15593. sym_type = ARM_MAP_ARM;
  15594. break;
  15595. case THUMB16_TYPE:
  15596. case THUMB32_TYPE:
  15597. sym_type = ARM_MAP_THUMB;
  15598. break;
  15599. case DATA_TYPE:
  15600. sym_type = ARM_MAP_DATA;
  15601. break;
  15602. default:
  15603. BFD_FAIL ();
  15604. return false;
  15605. }
  15606. if (template_sequence[i].type != prev_type)
  15607. {
  15608. prev_type = template_sequence[i].type;
  15609. if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
  15610. return false;
  15611. }
  15612. switch (template_sequence[i].type)
  15613. {
  15614. case ARM_TYPE:
  15615. case THUMB32_TYPE:
  15616. size += 4;
  15617. break;
  15618. case THUMB16_TYPE:
  15619. size += 2;
  15620. break;
  15621. case DATA_TYPE:
  15622. size += 4;
  15623. break;
  15624. default:
  15625. BFD_FAIL ();
  15626. return false;
  15627. }
  15628. }
  15629. return true;
  15630. }
  15631. /* Output mapping symbols for linker generated sections,
  15632. and for those data-only sections that do not have a
  15633. $d. */
  15634. static bool
  15635. elf32_arm_output_arch_local_syms (bfd *output_bfd,
  15636. struct bfd_link_info *info,
  15637. void *flaginfo,
  15638. int (*func) (void *, const char *,
  15639. Elf_Internal_Sym *,
  15640. asection *,
  15641. struct elf_link_hash_entry *))
  15642. {
  15643. output_arch_syminfo osi;
  15644. struct elf32_arm_link_hash_table *htab;
  15645. bfd_vma offset;
  15646. bfd_size_type size;
  15647. bfd *input_bfd;
  15648. htab = elf32_arm_hash_table (info);
  15649. if (htab == NULL)
  15650. return false;
  15651. check_use_blx (htab);
  15652. osi.flaginfo = flaginfo;
  15653. osi.info = info;
  15654. osi.func = func;
  15655. /* Add a $d mapping symbol to data-only sections that
  15656. don't have any mapping symbol. This may result in (harmless) redundant
  15657. mapping symbols. */
  15658. for (input_bfd = info->input_bfds;
  15659. input_bfd != NULL;
  15660. input_bfd = input_bfd->link.next)
  15661. {
  15662. if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
  15663. for (osi.sec = input_bfd->sections;
  15664. osi.sec != NULL;
  15665. osi.sec = osi.sec->next)
  15666. {
  15667. if (osi.sec->output_section != NULL
  15668. && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
  15669. != 0)
  15670. && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
  15671. == SEC_HAS_CONTENTS
  15672. && get_arm_elf_section_data (osi.sec) != NULL
  15673. && get_arm_elf_section_data (osi.sec)->mapcount == 0
  15674. && osi.sec->size > 0
  15675. && (osi.sec->flags & SEC_EXCLUDE) == 0)
  15676. {
  15677. osi.sec_shndx = _bfd_elf_section_from_bfd_section
  15678. (output_bfd, osi.sec->output_section);
  15679. if (osi.sec_shndx != (int)SHN_BAD)
  15680. elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
  15681. }
  15682. }
  15683. }
  15684. /* ARM->Thumb glue. */
  15685. if (htab->arm_glue_size > 0)
  15686. {
  15687. osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
  15688. ARM2THUMB_GLUE_SECTION_NAME);
  15689. osi.sec_shndx = _bfd_elf_section_from_bfd_section
  15690. (output_bfd, osi.sec->output_section);
  15691. if (bfd_link_pic (info) || htab->root.is_relocatable_executable
  15692. || htab->pic_veneer)
  15693. size = ARM2THUMB_PIC_GLUE_SIZE;
  15694. else if (htab->use_blx)
  15695. size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
  15696. else
  15697. size = ARM2THUMB_STATIC_GLUE_SIZE;
  15698. for (offset = 0; offset < htab->arm_glue_size; offset += size)
  15699. {
  15700. elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
  15701. elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
  15702. }
  15703. }
  15704. /* Thumb->ARM glue. */
  15705. if (htab->thumb_glue_size > 0)
  15706. {
  15707. osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
  15708. THUMB2ARM_GLUE_SECTION_NAME);
  15709. osi.sec_shndx = _bfd_elf_section_from_bfd_section
  15710. (output_bfd, osi.sec->output_section);
  15711. size = THUMB2ARM_GLUE_SIZE;
  15712. for (offset = 0; offset < htab->thumb_glue_size; offset += size)
  15713. {
  15714. elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
  15715. elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
  15716. }
  15717. }
  15718. /* ARMv4 BX veneers. */
  15719. if (htab->bx_glue_size > 0)
  15720. {
  15721. osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
  15722. ARM_BX_GLUE_SECTION_NAME);
  15723. osi.sec_shndx = _bfd_elf_section_from_bfd_section
  15724. (output_bfd, osi.sec->output_section);
  15725. elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
  15726. }
  15727. /* Long calls stubs. */
  15728. if (htab->stub_bfd && htab->stub_bfd->sections)
  15729. {
  15730. asection* stub_sec;
  15731. for (stub_sec = htab->stub_bfd->sections;
  15732. stub_sec != NULL;
  15733. stub_sec = stub_sec->next)
  15734. {
  15735. /* Ignore non-stub sections. */
  15736. if (!strstr (stub_sec->name, STUB_SUFFIX))
  15737. continue;
  15738. osi.sec = stub_sec;
  15739. osi.sec_shndx = _bfd_elf_section_from_bfd_section
  15740. (output_bfd, osi.sec->output_section);
  15741. bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
  15742. }
  15743. }
  15744. /* Finally, output mapping symbols for the PLT. */
  15745. if (htab->root.splt && htab->root.splt->size > 0)
  15746. {
  15747. osi.sec = htab->root.splt;
  15748. osi.sec_shndx = (_bfd_elf_section_from_bfd_section
  15749. (output_bfd, osi.sec->output_section));
  15750. /* Output mapping symbols for the plt header. */
  15751. if (htab->root.target_os == is_vxworks)
  15752. {
  15753. /* VxWorks shared libraries have no PLT header. */
  15754. if (!bfd_link_pic (info))
  15755. {
  15756. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
  15757. return false;
  15758. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
  15759. return false;
  15760. }
  15761. }
  15762. else if (htab->root.target_os == is_nacl)
  15763. {
  15764. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
  15765. return false;
  15766. }
  15767. else if (using_thumb_only (htab) && !htab->fdpic_p)
  15768. {
  15769. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
  15770. return false;
  15771. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
  15772. return false;
  15773. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
  15774. return false;
  15775. }
  15776. else if (!htab->fdpic_p)
  15777. {
  15778. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
  15779. return false;
  15780. #ifndef FOUR_WORD_PLT
  15781. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
  15782. return false;
  15783. #endif
  15784. }
  15785. }
  15786. if (htab->root.target_os == is_nacl
  15787. && htab->root.iplt
  15788. && htab->root.iplt->size > 0)
  15789. {
  15790. /* NaCl uses a special first entry in .iplt too. */
  15791. osi.sec = htab->root.iplt;
  15792. osi.sec_shndx = (_bfd_elf_section_from_bfd_section
  15793. (output_bfd, osi.sec->output_section));
  15794. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
  15795. return false;
  15796. }
  15797. if ((htab->root.splt && htab->root.splt->size > 0)
  15798. || (htab->root.iplt && htab->root.iplt->size > 0))
  15799. {
  15800. elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
  15801. for (input_bfd = info->input_bfds;
  15802. input_bfd != NULL;
  15803. input_bfd = input_bfd->link.next)
  15804. {
  15805. struct arm_local_iplt_info **local_iplt;
  15806. unsigned int i, num_syms;
  15807. local_iplt = elf32_arm_local_iplt (input_bfd);
  15808. if (local_iplt != NULL)
  15809. {
  15810. num_syms = elf_symtab_hdr (input_bfd).sh_info;
  15811. if (num_syms > elf32_arm_num_entries (input_bfd))
  15812. {
  15813. _bfd_error_handler (_("\
  15814. %pB: Number of symbols in input file has increased from %lu to %u\n"),
  15815. input_bfd,
  15816. (unsigned long) elf32_arm_num_entries (input_bfd),
  15817. num_syms);
  15818. return false;
  15819. }
  15820. for (i = 0; i < num_syms; i++)
  15821. if (local_iplt[i] != NULL
  15822. && !elf32_arm_output_plt_map_1 (&osi, true,
  15823. &local_iplt[i]->root,
  15824. &local_iplt[i]->arm))
  15825. return false;
  15826. }
  15827. }
  15828. }
  15829. if (htab->root.tlsdesc_plt != 0)
  15830. {
  15831. /* Mapping symbols for the lazy tls trampoline. */
  15832. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM,
  15833. htab->root.tlsdesc_plt))
  15834. return false;
  15835. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
  15836. htab->root.tlsdesc_plt + 24))
  15837. return false;
  15838. }
  15839. if (htab->tls_trampoline != 0)
  15840. {
  15841. /* Mapping symbols for the tls trampoline. */
  15842. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
  15843. return false;
  15844. #ifdef FOUR_WORD_PLT
  15845. if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
  15846. htab->tls_trampoline + 12))
  15847. return false;
  15848. #endif
  15849. }
  15850. return true;
  15851. }
  15852. /* Filter normal symbols of CMSE entry functions of ABFD to include in
  15853. the import library. All SYMCOUNT symbols of ABFD can be examined
  15854. from their pointers in SYMS. Pointers of symbols to keep should be
  15855. stored continuously at the beginning of that array.
  15856. Returns the number of symbols to keep. */
  15857. static unsigned int
  15858. elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
  15859. struct bfd_link_info *info,
  15860. asymbol **syms, long symcount)
  15861. {
  15862. size_t maxnamelen;
  15863. char *cmse_name;
  15864. long src_count, dst_count = 0;
  15865. struct elf32_arm_link_hash_table *htab;
  15866. htab = elf32_arm_hash_table (info);
  15867. if (!htab->stub_bfd || !htab->stub_bfd->sections)
  15868. symcount = 0;
  15869. maxnamelen = 128;
  15870. cmse_name = (char *) bfd_malloc (maxnamelen);
  15871. BFD_ASSERT (cmse_name);
  15872. for (src_count = 0; src_count < symcount; src_count++)
  15873. {
  15874. struct elf32_arm_link_hash_entry *cmse_hash;
  15875. asymbol *sym;
  15876. flagword flags;
  15877. char *name;
  15878. size_t namelen;
  15879. sym = syms[src_count];
  15880. flags = sym->flags;
  15881. name = (char *) bfd_asymbol_name (sym);
  15882. if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
  15883. continue;
  15884. if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
  15885. continue;
  15886. namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
  15887. if (namelen > maxnamelen)
  15888. {
  15889. cmse_name = (char *)
  15890. bfd_realloc (cmse_name, namelen);
  15891. maxnamelen = namelen;
  15892. }
  15893. snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
  15894. cmse_hash = (struct elf32_arm_link_hash_entry *)
  15895. elf_link_hash_lookup (&(htab)->root, cmse_name, false, false, true);
  15896. if (!cmse_hash
  15897. || (cmse_hash->root.root.type != bfd_link_hash_defined
  15898. && cmse_hash->root.root.type != bfd_link_hash_defweak)
  15899. || cmse_hash->root.type != STT_FUNC)
  15900. continue;
  15901. syms[dst_count++] = sym;
  15902. }
  15903. free (cmse_name);
  15904. syms[dst_count] = NULL;
  15905. return dst_count;
  15906. }
  15907. /* Filter symbols of ABFD to include in the import library. All
  15908. SYMCOUNT symbols of ABFD can be examined from their pointers in
  15909. SYMS. Pointers of symbols to keep should be stored continuously at
  15910. the beginning of that array.
  15911. Returns the number of symbols to keep. */
  15912. static unsigned int
  15913. elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
  15914. struct bfd_link_info *info,
  15915. asymbol **syms, long symcount)
  15916. {
  15917. struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
  15918. /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
  15919. Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
  15920. library to be a relocatable object file. */
  15921. BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
  15922. if (globals->cmse_implib)
  15923. return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
  15924. else
  15925. return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
  15926. }
  15927. /* Allocate target specific section data. */
  15928. static bool
  15929. elf32_arm_new_section_hook (bfd *abfd, asection *sec)
  15930. {
  15931. if (!sec->used_by_bfd)
  15932. {
  15933. _arm_elf_section_data *sdata;
  15934. size_t amt = sizeof (*sdata);
  15935. sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
  15936. if (sdata == NULL)
  15937. return false;
  15938. sec->used_by_bfd = sdata;
  15939. }
  15940. return _bfd_elf_new_section_hook (abfd, sec);
  15941. }
  15942. /* Used to order a list of mapping symbols by address. */
  15943. static int
  15944. elf32_arm_compare_mapping (const void * a, const void * b)
  15945. {
  15946. const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
  15947. const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
  15948. if (amap->vma > bmap->vma)
  15949. return 1;
  15950. else if (amap->vma < bmap->vma)
  15951. return -1;
  15952. else if (amap->type > bmap->type)
  15953. /* Ensure results do not depend on the host qsort for objects with
  15954. multiple mapping symbols at the same address by sorting on type
  15955. after vma. */
  15956. return 1;
  15957. else if (amap->type < bmap->type)
  15958. return -1;
  15959. else
  15960. return 0;
  15961. }
  15962. /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
  15963. static unsigned long
  15964. offset_prel31 (unsigned long addr, bfd_vma offset)
  15965. {
  15966. return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
  15967. }
  15968. /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
  15969. relocations. */
  15970. static void
  15971. copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
  15972. {
  15973. unsigned long first_word = bfd_get_32 (output_bfd, from);
  15974. unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
  15975. /* High bit of first word is supposed to be zero. */
  15976. if ((first_word & 0x80000000ul) == 0)
  15977. first_word = offset_prel31 (first_word, offset);
  15978. /* If the high bit of the first word is clear, and the bit pattern is not 0x1
  15979. (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
  15980. if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
  15981. second_word = offset_prel31 (second_word, offset);
  15982. bfd_put_32 (output_bfd, first_word, to);
  15983. bfd_put_32 (output_bfd, second_word, to + 4);
  15984. }
  15985. /* Data for make_branch_to_a8_stub(). */
  15986. struct a8_branch_to_stub_data
  15987. {
  15988. asection *writing_section;
  15989. bfd_byte *contents;
  15990. };
  15991. /* Helper to insert branches to Cortex-A8 erratum stubs in the right
  15992. places for a particular section. */
  15993. static bool
  15994. make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
  15995. void *in_arg)
  15996. {
  15997. struct elf32_arm_stub_hash_entry *stub_entry;
  15998. struct a8_branch_to_stub_data *data;
  15999. bfd_byte *contents;
  16000. unsigned long branch_insn;
  16001. bfd_vma veneered_insn_loc, veneer_entry_loc;
  16002. bfd_signed_vma branch_offset;
  16003. bfd *abfd;
  16004. unsigned int loc;
  16005. stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
  16006. data = (struct a8_branch_to_stub_data *) in_arg;
  16007. if (stub_entry->target_section != data->writing_section
  16008. || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
  16009. return true;
  16010. contents = data->contents;
  16011. /* We use target_section as Cortex-A8 erratum workaround stubs are only
  16012. generated when both source and target are in the same section. */
  16013. veneered_insn_loc = stub_entry->target_section->output_section->vma
  16014. + stub_entry->target_section->output_offset
  16015. + stub_entry->source_value;
  16016. veneer_entry_loc = stub_entry->stub_sec->output_section->vma
  16017. + stub_entry->stub_sec->output_offset
  16018. + stub_entry->stub_offset;
  16019. if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
  16020. veneered_insn_loc &= ~3u;
  16021. branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
  16022. abfd = stub_entry->target_section->owner;
  16023. loc = stub_entry->source_value;
  16024. /* We attempt to avoid this condition by setting stubs_always_after_branch
  16025. in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
  16026. This check is just to be on the safe side... */
  16027. if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
  16028. {
  16029. _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
  16030. "allocated in unsafe location"), abfd);
  16031. return false;
  16032. }
  16033. switch (stub_entry->stub_type)
  16034. {
  16035. case arm_stub_a8_veneer_b:
  16036. case arm_stub_a8_veneer_b_cond:
  16037. branch_insn = 0xf0009000;
  16038. goto jump24;
  16039. case arm_stub_a8_veneer_blx:
  16040. branch_insn = 0xf000e800;
  16041. goto jump24;
  16042. case arm_stub_a8_veneer_bl:
  16043. {
  16044. unsigned int i1, j1, i2, j2, s;
  16045. branch_insn = 0xf000d000;
  16046. jump24:
  16047. if (branch_offset < -16777216 || branch_offset > 16777214)
  16048. {
  16049. /* There's not much we can do apart from complain if this
  16050. happens. */
  16051. _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
  16052. "of range (input file too large)"), abfd);
  16053. return false;
  16054. }
  16055. /* i1 = not(j1 eor s), so:
  16056. not i1 = j1 eor s
  16057. j1 = (not i1) eor s. */
  16058. branch_insn |= (branch_offset >> 1) & 0x7ff;
  16059. branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
  16060. i2 = (branch_offset >> 22) & 1;
  16061. i1 = (branch_offset >> 23) & 1;
  16062. s = (branch_offset >> 24) & 1;
  16063. j1 = (!i1) ^ s;
  16064. j2 = (!i2) ^ s;
  16065. branch_insn |= j2 << 11;
  16066. branch_insn |= j1 << 13;
  16067. branch_insn |= s << 26;
  16068. }
  16069. break;
  16070. default:
  16071. BFD_FAIL ();
  16072. return false;
  16073. }
  16074. bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
  16075. bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
  16076. return true;
  16077. }
  16078. /* Beginning of stm32l4xx work-around. */
  16079. /* Functions encoding instructions necessary for the emission of the
  16080. fix-stm32l4xx-629360.
  16081. Encoding is extracted from the
  16082. ARM (C) Architecture Reference Manual
  16083. ARMv7-A and ARMv7-R edition
  16084. ARM DDI 0406C.b (ID072512). */
  16085. static inline bfd_vma
  16086. create_instruction_branch_absolute (int branch_offset)
  16087. {
  16088. /* A8.8.18 B (A8-334)
  16089. B target_address (Encoding T4). */
  16090. /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
  16091. /* jump offset is: S:I1:I2:imm10:imm11:0. */
  16092. /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
  16093. int s = ((branch_offset & 0x1000000) >> 24);
  16094. int j1 = s ^ !((branch_offset & 0x800000) >> 23);
  16095. int j2 = s ^ !((branch_offset & 0x400000) >> 22);
  16096. if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
  16097. BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
  16098. bfd_vma patched_inst = 0xf0009000
  16099. | s << 26 /* S. */
  16100. | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
  16101. | j1 << 13 /* J1. */
  16102. | j2 << 11 /* J2. */
  16103. | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
  16104. return patched_inst;
  16105. }
  16106. static inline bfd_vma
  16107. create_instruction_ldmia (int base_reg, int wback, int reg_mask)
  16108. {
  16109. /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
  16110. LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
  16111. bfd_vma patched_inst = 0xe8900000
  16112. | (/*W=*/wback << 21)
  16113. | (base_reg << 16)
  16114. | (reg_mask & 0x0000ffff);
  16115. return patched_inst;
  16116. }
  16117. static inline bfd_vma
  16118. create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
  16119. {
  16120. /* A8.8.60 LDMDB/LDMEA (A8-402)
  16121. LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
  16122. bfd_vma patched_inst = 0xe9100000
  16123. | (/*W=*/wback << 21)
  16124. | (base_reg << 16)
  16125. | (reg_mask & 0x0000ffff);
  16126. return patched_inst;
  16127. }
  16128. static inline bfd_vma
  16129. create_instruction_mov (int target_reg, int source_reg)
  16130. {
  16131. /* A8.8.103 MOV (register) (A8-486)
  16132. MOV Rd, Rm (Encoding T1). */
  16133. bfd_vma patched_inst = 0x4600
  16134. | (target_reg & 0x7)
  16135. | ((target_reg & 0x8) >> 3) << 7
  16136. | (source_reg << 3);
  16137. return patched_inst;
  16138. }
  16139. static inline bfd_vma
  16140. create_instruction_sub (int target_reg, int source_reg, int value)
  16141. {
  16142. /* A8.8.221 SUB (immediate) (A8-708)
  16143. SUB Rd, Rn, #value (Encoding T3). */
  16144. bfd_vma patched_inst = 0xf1a00000
  16145. | (target_reg << 8)
  16146. | (source_reg << 16)
  16147. | (/*S=*/0 << 20)
  16148. | ((value & 0x800) >> 11) << 26
  16149. | ((value & 0x700) >> 8) << 12
  16150. | (value & 0x0ff);
  16151. return patched_inst;
  16152. }
  16153. static inline bfd_vma
  16154. create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
  16155. int first_reg)
  16156. {
  16157. /* A8.8.332 VLDM (A8-922)
  16158. VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
  16159. bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
  16160. | (/*W=*/wback << 21)
  16161. | (base_reg << 16)
  16162. | (num_words & 0x000000ff)
  16163. | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
  16164. | (first_reg & 0x00000001) << 22;
  16165. return patched_inst;
  16166. }
  16167. static inline bfd_vma
  16168. create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
  16169. int first_reg)
  16170. {
  16171. /* A8.8.332 VLDM (A8-922)
  16172. VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
  16173. bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
  16174. | (base_reg << 16)
  16175. | (num_words & 0x000000ff)
  16176. | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
  16177. | (first_reg & 0x00000001) << 22;
  16178. return patched_inst;
  16179. }
  16180. static inline bfd_vma
  16181. create_instruction_udf_w (int value)
  16182. {
  16183. /* A8.8.247 UDF (A8-758)
  16184. Undefined (Encoding T2). */
  16185. bfd_vma patched_inst = 0xf7f0a000
  16186. | (value & 0x00000fff)
  16187. | (value & 0x000f0000) << 16;
  16188. return patched_inst;
  16189. }
  16190. static inline bfd_vma
  16191. create_instruction_udf (int value)
  16192. {
  16193. /* A8.8.247 UDF (A8-758)
  16194. Undefined (Encoding T1). */
  16195. bfd_vma patched_inst = 0xde00
  16196. | (value & 0xff);
  16197. return patched_inst;
  16198. }
  16199. /* Functions writing an instruction in memory, returning the next
  16200. memory position to write to. */
  16201. static inline bfd_byte *
  16202. push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
  16203. bfd * output_bfd, bfd_byte *pt, insn32 insn)
  16204. {
  16205. put_thumb2_insn (htab, output_bfd, insn, pt);
  16206. return pt + 4;
  16207. }
  16208. static inline bfd_byte *
  16209. push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
  16210. bfd * output_bfd, bfd_byte *pt, insn32 insn)
  16211. {
  16212. put_thumb_insn (htab, output_bfd, insn, pt);
  16213. return pt + 2;
  16214. }
  16215. /* Function filling up a region in memory with T1 and T2 UDFs taking
  16216. care of alignment. */
  16217. static bfd_byte *
  16218. stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
  16219. bfd * output_bfd,
  16220. const bfd_byte * const base_stub_contents,
  16221. bfd_byte * const from_stub_contents,
  16222. const bfd_byte * const end_stub_contents)
  16223. {
  16224. bfd_byte *current_stub_contents = from_stub_contents;
  16225. /* Fill the remaining of the stub with deterministic contents : UDF
  16226. instructions.
  16227. Check if realignment is needed on modulo 4 frontier using T1, to
  16228. further use T2. */
  16229. if ((current_stub_contents < end_stub_contents)
  16230. && !((current_stub_contents - base_stub_contents) % 2)
  16231. && ((current_stub_contents - base_stub_contents) % 4))
  16232. current_stub_contents =
  16233. push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
  16234. create_instruction_udf (0));
  16235. for (; current_stub_contents < end_stub_contents;)
  16236. current_stub_contents =
  16237. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16238. create_instruction_udf_w (0));
  16239. return current_stub_contents;
  16240. }
  16241. /* Functions writing the stream of instructions equivalent to the
  16242. derived sequence for ldmia, ldmdb, vldm respectively. */
  16243. static void
  16244. stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
  16245. bfd * output_bfd,
  16246. const insn32 initial_insn,
  16247. const bfd_byte *const initial_insn_addr,
  16248. bfd_byte *const base_stub_contents)
  16249. {
  16250. int wback = (initial_insn & 0x00200000) >> 21;
  16251. int ri, rn = (initial_insn & 0x000F0000) >> 16;
  16252. int insn_all_registers = initial_insn & 0x0000ffff;
  16253. int insn_low_registers, insn_high_registers;
  16254. int usable_register_mask;
  16255. int nb_registers = elf32_arm_popcount (insn_all_registers);
  16256. int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
  16257. int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
  16258. bfd_byte *current_stub_contents = base_stub_contents;
  16259. BFD_ASSERT (is_thumb2_ldmia (initial_insn));
  16260. /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
  16261. smaller than 8 registers load sequences that do not cause the
  16262. hardware issue. */
  16263. if (nb_registers <= 8)
  16264. {
  16265. /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
  16266. current_stub_contents =
  16267. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16268. initial_insn);
  16269. /* B initial_insn_addr+4. */
  16270. if (!restore_pc)
  16271. current_stub_contents =
  16272. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16273. create_instruction_branch_absolute
  16274. (initial_insn_addr - current_stub_contents));
  16275. /* Fill the remaining of the stub with deterministic contents. */
  16276. current_stub_contents =
  16277. stm32l4xx_fill_stub_udf (htab, output_bfd,
  16278. base_stub_contents, current_stub_contents,
  16279. base_stub_contents +
  16280. STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
  16281. return;
  16282. }
  16283. /* - reg_list[13] == 0. */
  16284. BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
  16285. /* - reg_list[14] & reg_list[15] != 1. */
  16286. BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
  16287. /* - if (wback==1) reg_list[rn] == 0. */
  16288. BFD_ASSERT (!wback || !restore_rn);
  16289. /* - nb_registers > 8. */
  16290. BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
  16291. /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
  16292. /* In the following algorithm, we split this wide LDM using 2 LDM insns:
  16293. - One with the 7 lowest registers (register mask 0x007F)
  16294. This LDM will finally contain between 2 and 7 registers
  16295. - One with the 7 highest registers (register mask 0xDF80)
  16296. This ldm will finally contain between 2 and 7 registers. */
  16297. insn_low_registers = insn_all_registers & 0x007F;
  16298. insn_high_registers = insn_all_registers & 0xDF80;
  16299. /* A spare register may be needed during this veneer to temporarily
  16300. handle the base register. This register will be restored with the
  16301. last LDM operation.
  16302. The usable register may be any general purpose register (that
  16303. excludes PC, SP, LR : register mask is 0x1FFF). */
  16304. usable_register_mask = 0x1FFF;
  16305. /* Generate the stub function. */
  16306. if (wback)
  16307. {
  16308. /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
  16309. current_stub_contents =
  16310. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16311. create_instruction_ldmia
  16312. (rn, /*wback=*/1, insn_low_registers));
  16313. /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
  16314. current_stub_contents =
  16315. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16316. create_instruction_ldmia
  16317. (rn, /*wback=*/1, insn_high_registers));
  16318. if (!restore_pc)
  16319. {
  16320. /* B initial_insn_addr+4. */
  16321. current_stub_contents =
  16322. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16323. create_instruction_branch_absolute
  16324. (initial_insn_addr - current_stub_contents));
  16325. }
  16326. }
  16327. else /* if (!wback). */
  16328. {
  16329. ri = rn;
  16330. /* If Rn is not part of the high-register-list, move it there. */
  16331. if (!(insn_high_registers & (1 << rn)))
  16332. {
  16333. /* Choose a Ri in the high-register-list that will be restored. */
  16334. ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
  16335. /* MOV Ri, Rn. */
  16336. current_stub_contents =
  16337. push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
  16338. create_instruction_mov (ri, rn));
  16339. }
  16340. /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
  16341. current_stub_contents =
  16342. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16343. create_instruction_ldmia
  16344. (ri, /*wback=*/1, insn_low_registers));
  16345. /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
  16346. current_stub_contents =
  16347. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16348. create_instruction_ldmia
  16349. (ri, /*wback=*/0, insn_high_registers));
  16350. if (!restore_pc)
  16351. {
  16352. /* B initial_insn_addr+4. */
  16353. current_stub_contents =
  16354. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16355. create_instruction_branch_absolute
  16356. (initial_insn_addr - current_stub_contents));
  16357. }
  16358. }
  16359. /* Fill the remaining of the stub with deterministic contents. */
  16360. current_stub_contents =
  16361. stm32l4xx_fill_stub_udf (htab, output_bfd,
  16362. base_stub_contents, current_stub_contents,
  16363. base_stub_contents +
  16364. STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
  16365. }
  16366. static void
  16367. stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
  16368. bfd * output_bfd,
  16369. const insn32 initial_insn,
  16370. const bfd_byte *const initial_insn_addr,
  16371. bfd_byte *const base_stub_contents)
  16372. {
  16373. int wback = (initial_insn & 0x00200000) >> 21;
  16374. int ri, rn = (initial_insn & 0x000f0000) >> 16;
  16375. int insn_all_registers = initial_insn & 0x0000ffff;
  16376. int insn_low_registers, insn_high_registers;
  16377. int usable_register_mask;
  16378. int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
  16379. int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
  16380. int nb_registers = elf32_arm_popcount (insn_all_registers);
  16381. bfd_byte *current_stub_contents = base_stub_contents;
  16382. BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
  16383. /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
  16384. smaller than 8 registers load sequences that do not cause the
  16385. hardware issue. */
  16386. if (nb_registers <= 8)
  16387. {
  16388. /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
  16389. current_stub_contents =
  16390. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16391. initial_insn);
  16392. /* B initial_insn_addr+4. */
  16393. current_stub_contents =
  16394. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16395. create_instruction_branch_absolute
  16396. (initial_insn_addr - current_stub_contents));
  16397. /* Fill the remaining of the stub with deterministic contents. */
  16398. current_stub_contents =
  16399. stm32l4xx_fill_stub_udf (htab, output_bfd,
  16400. base_stub_contents, current_stub_contents,
  16401. base_stub_contents +
  16402. STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
  16403. return;
  16404. }
  16405. /* - reg_list[13] == 0. */
  16406. BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
  16407. /* - reg_list[14] & reg_list[15] != 1. */
  16408. BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
  16409. /* - if (wback==1) reg_list[rn] == 0. */
  16410. BFD_ASSERT (!wback || !restore_rn);
  16411. /* - nb_registers > 8. */
  16412. BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
  16413. /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
  16414. /* In the following algorithm, we split this wide LDM using 2 LDM insn:
  16415. - One with the 7 lowest registers (register mask 0x007F)
  16416. This LDM will finally contain between 2 and 7 registers
  16417. - One with the 7 highest registers (register mask 0xDF80)
  16418. This ldm will finally contain between 2 and 7 registers. */
  16419. insn_low_registers = insn_all_registers & 0x007F;
  16420. insn_high_registers = insn_all_registers & 0xDF80;
  16421. /* A spare register may be needed during this veneer to temporarily
  16422. handle the base register. This register will be restored with
  16423. the last LDM operation.
  16424. The usable register may be any general purpose register (that excludes
  16425. PC, SP, LR : register mask is 0x1FFF). */
  16426. usable_register_mask = 0x1FFF;
  16427. /* Generate the stub function. */
  16428. if (!wback && !restore_pc && !restore_rn)
  16429. {
  16430. /* Choose a Ri in the low-register-list that will be restored. */
  16431. ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
  16432. /* MOV Ri, Rn. */
  16433. current_stub_contents =
  16434. push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
  16435. create_instruction_mov (ri, rn));
  16436. /* LDMDB Ri!, {R-high-register-list}. */
  16437. current_stub_contents =
  16438. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16439. create_instruction_ldmdb
  16440. (ri, /*wback=*/1, insn_high_registers));
  16441. /* LDMDB Ri, {R-low-register-list}. */
  16442. current_stub_contents =
  16443. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16444. create_instruction_ldmdb
  16445. (ri, /*wback=*/0, insn_low_registers));
  16446. /* B initial_insn_addr+4. */
  16447. current_stub_contents =
  16448. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16449. create_instruction_branch_absolute
  16450. (initial_insn_addr - current_stub_contents));
  16451. }
  16452. else if (wback && !restore_pc && !restore_rn)
  16453. {
  16454. /* LDMDB Rn!, {R-high-register-list}. */
  16455. current_stub_contents =
  16456. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16457. create_instruction_ldmdb
  16458. (rn, /*wback=*/1, insn_high_registers));
  16459. /* LDMDB Rn!, {R-low-register-list}. */
  16460. current_stub_contents =
  16461. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16462. create_instruction_ldmdb
  16463. (rn, /*wback=*/1, insn_low_registers));
  16464. /* B initial_insn_addr+4. */
  16465. current_stub_contents =
  16466. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16467. create_instruction_branch_absolute
  16468. (initial_insn_addr - current_stub_contents));
  16469. }
  16470. else if (!wback && restore_pc && !restore_rn)
  16471. {
  16472. /* Choose a Ri in the high-register-list that will be restored. */
  16473. ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
  16474. /* SUB Ri, Rn, #(4*nb_registers). */
  16475. current_stub_contents =
  16476. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16477. create_instruction_sub (ri, rn, (4 * nb_registers)));
  16478. /* LDMIA Ri!, {R-low-register-list}. */
  16479. current_stub_contents =
  16480. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16481. create_instruction_ldmia
  16482. (ri, /*wback=*/1, insn_low_registers));
  16483. /* LDMIA Ri, {R-high-register-list}. */
  16484. current_stub_contents =
  16485. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16486. create_instruction_ldmia
  16487. (ri, /*wback=*/0, insn_high_registers));
  16488. }
  16489. else if (wback && restore_pc && !restore_rn)
  16490. {
  16491. /* Choose a Ri in the high-register-list that will be restored. */
  16492. ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
  16493. /* SUB Rn, Rn, #(4*nb_registers) */
  16494. current_stub_contents =
  16495. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16496. create_instruction_sub (rn, rn, (4 * nb_registers)));
  16497. /* MOV Ri, Rn. */
  16498. current_stub_contents =
  16499. push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
  16500. create_instruction_mov (ri, rn));
  16501. /* LDMIA Ri!, {R-low-register-list}. */
  16502. current_stub_contents =
  16503. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16504. create_instruction_ldmia
  16505. (ri, /*wback=*/1, insn_low_registers));
  16506. /* LDMIA Ri, {R-high-register-list}. */
  16507. current_stub_contents =
  16508. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16509. create_instruction_ldmia
  16510. (ri, /*wback=*/0, insn_high_registers));
  16511. }
  16512. else if (!wback && !restore_pc && restore_rn)
  16513. {
  16514. ri = rn;
  16515. if (!(insn_low_registers & (1 << rn)))
  16516. {
  16517. /* Choose a Ri in the low-register-list that will be restored. */
  16518. ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
  16519. /* MOV Ri, Rn. */
  16520. current_stub_contents =
  16521. push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
  16522. create_instruction_mov (ri, rn));
  16523. }
  16524. /* LDMDB Ri!, {R-high-register-list}. */
  16525. current_stub_contents =
  16526. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16527. create_instruction_ldmdb
  16528. (ri, /*wback=*/1, insn_high_registers));
  16529. /* LDMDB Ri, {R-low-register-list}. */
  16530. current_stub_contents =
  16531. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16532. create_instruction_ldmdb
  16533. (ri, /*wback=*/0, insn_low_registers));
  16534. /* B initial_insn_addr+4. */
  16535. current_stub_contents =
  16536. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16537. create_instruction_branch_absolute
  16538. (initial_insn_addr - current_stub_contents));
  16539. }
  16540. else if (!wback && restore_pc && restore_rn)
  16541. {
  16542. ri = rn;
  16543. if (!(insn_high_registers & (1 << rn)))
  16544. {
  16545. /* Choose a Ri in the high-register-list that will be restored. */
  16546. ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
  16547. }
  16548. /* SUB Ri, Rn, #(4*nb_registers). */
  16549. current_stub_contents =
  16550. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16551. create_instruction_sub (ri, rn, (4 * nb_registers)));
  16552. /* LDMIA Ri!, {R-low-register-list}. */
  16553. current_stub_contents =
  16554. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16555. create_instruction_ldmia
  16556. (ri, /*wback=*/1, insn_low_registers));
  16557. /* LDMIA Ri, {R-high-register-list}. */
  16558. current_stub_contents =
  16559. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16560. create_instruction_ldmia
  16561. (ri, /*wback=*/0, insn_high_registers));
  16562. }
  16563. else if (wback && restore_rn)
  16564. {
  16565. /* The assembler should not have accepted to encode this. */
  16566. BFD_ASSERT (0 && "Cannot patch an instruction that has an "
  16567. "undefined behavior.\n");
  16568. }
  16569. /* Fill the remaining of the stub with deterministic contents. */
  16570. current_stub_contents =
  16571. stm32l4xx_fill_stub_udf (htab, output_bfd,
  16572. base_stub_contents, current_stub_contents,
  16573. base_stub_contents +
  16574. STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
  16575. }
  16576. static void
  16577. stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
  16578. bfd * output_bfd,
  16579. const insn32 initial_insn,
  16580. const bfd_byte *const initial_insn_addr,
  16581. bfd_byte *const base_stub_contents)
  16582. {
  16583. int num_words = initial_insn & 0xff;
  16584. bfd_byte *current_stub_contents = base_stub_contents;
  16585. BFD_ASSERT (is_thumb2_vldm (initial_insn));
  16586. /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
  16587. smaller than 8 words load sequences that do not cause the
  16588. hardware issue. */
  16589. if (num_words <= 8)
  16590. {
  16591. /* Untouched instruction. */
  16592. current_stub_contents =
  16593. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16594. initial_insn);
  16595. /* B initial_insn_addr+4. */
  16596. current_stub_contents =
  16597. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16598. create_instruction_branch_absolute
  16599. (initial_insn_addr - current_stub_contents));
  16600. }
  16601. else
  16602. {
  16603. bool is_dp = /* DP encoding. */
  16604. (initial_insn & 0xfe100f00) == 0xec100b00;
  16605. bool is_ia_nobang = /* (IA without !). */
  16606. (((initial_insn << 7) >> 28) & 0xd) == 0x4;
  16607. bool is_ia_bang = /* (IA with !) - includes VPOP. */
  16608. (((initial_insn << 7) >> 28) & 0xd) == 0x5;
  16609. bool is_db_bang = /* (DB with !). */
  16610. (((initial_insn << 7) >> 28) & 0xd) == 0x9;
  16611. int base_reg = ((unsigned int) initial_insn << 12) >> 28;
  16612. /* d = UInt (Vd:D);. */
  16613. int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
  16614. | (((unsigned int)initial_insn << 9) >> 31);
  16615. /* Compute the number of 8-words chunks needed to split. */
  16616. int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
  16617. int chunk;
  16618. /* The test coverage has been done assuming the following
  16619. hypothesis that exactly one of the previous is_ predicates is
  16620. true. */
  16621. BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
  16622. && !(is_ia_nobang & is_ia_bang & is_db_bang));
  16623. /* We treat the cutting of the words in one pass for all
  16624. cases, then we emit the adjustments:
  16625. vldm rx, {...}
  16626. -> vldm rx!, {8_words_or_less} for each needed 8_word
  16627. -> sub rx, rx, #size (list)
  16628. vldm rx!, {...}
  16629. -> vldm rx!, {8_words_or_less} for each needed 8_word
  16630. This also handles vpop instruction (when rx is sp)
  16631. vldmd rx!, {...}
  16632. -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
  16633. for (chunk = 0; chunk < chunks; ++chunk)
  16634. {
  16635. bfd_vma new_insn = 0;
  16636. if (is_ia_nobang || is_ia_bang)
  16637. {
  16638. new_insn = create_instruction_vldmia
  16639. (base_reg,
  16640. is_dp,
  16641. /*wback= . */1,
  16642. chunks - (chunk + 1) ?
  16643. 8 : num_words - chunk * 8,
  16644. first_reg + chunk * 8);
  16645. }
  16646. else if (is_db_bang)
  16647. {
  16648. new_insn = create_instruction_vldmdb
  16649. (base_reg,
  16650. is_dp,
  16651. chunks - (chunk + 1) ?
  16652. 8 : num_words - chunk * 8,
  16653. first_reg + chunk * 8);
  16654. }
  16655. if (new_insn)
  16656. current_stub_contents =
  16657. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16658. new_insn);
  16659. }
  16660. /* Only this case requires the base register compensation
  16661. subtract. */
  16662. if (is_ia_nobang)
  16663. {
  16664. current_stub_contents =
  16665. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16666. create_instruction_sub
  16667. (base_reg, base_reg, 4*num_words));
  16668. }
  16669. /* B initial_insn_addr+4. */
  16670. current_stub_contents =
  16671. push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
  16672. create_instruction_branch_absolute
  16673. (initial_insn_addr - current_stub_contents));
  16674. }
  16675. /* Fill the remaining of the stub with deterministic contents. */
  16676. current_stub_contents =
  16677. stm32l4xx_fill_stub_udf (htab, output_bfd,
  16678. base_stub_contents, current_stub_contents,
  16679. base_stub_contents +
  16680. STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
  16681. }
  16682. static void
  16683. stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
  16684. bfd * output_bfd,
  16685. const insn32 wrong_insn,
  16686. const bfd_byte *const wrong_insn_addr,
  16687. bfd_byte *const stub_contents)
  16688. {
  16689. if (is_thumb2_ldmia (wrong_insn))
  16690. stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
  16691. wrong_insn, wrong_insn_addr,
  16692. stub_contents);
  16693. else if (is_thumb2_ldmdb (wrong_insn))
  16694. stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
  16695. wrong_insn, wrong_insn_addr,
  16696. stub_contents);
  16697. else if (is_thumb2_vldm (wrong_insn))
  16698. stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
  16699. wrong_insn, wrong_insn_addr,
  16700. stub_contents);
  16701. }
  16702. /* End of stm32l4xx work-around. */
  16703. /* Do code byteswapping. Return FALSE afterwards so that the section is
  16704. written out as normal. */
  16705. static bool
  16706. elf32_arm_write_section (bfd *output_bfd,
  16707. struct bfd_link_info *link_info,
  16708. asection *sec,
  16709. bfd_byte *contents)
  16710. {
  16711. unsigned int mapcount, errcount;
  16712. _arm_elf_section_data *arm_data;
  16713. struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
  16714. elf32_arm_section_map *map;
  16715. elf32_vfp11_erratum_list *errnode;
  16716. elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
  16717. bfd_vma ptr;
  16718. bfd_vma end;
  16719. bfd_vma offset = sec->output_section->vma + sec->output_offset;
  16720. bfd_byte tmp;
  16721. unsigned int i;
  16722. if (globals == NULL)
  16723. return false;
  16724. /* If this section has not been allocated an _arm_elf_section_data
  16725. structure then we cannot record anything. */
  16726. arm_data = get_arm_elf_section_data (sec);
  16727. if (arm_data == NULL)
  16728. return false;
  16729. mapcount = arm_data->mapcount;
  16730. map = arm_data->map;
  16731. errcount = arm_data->erratumcount;
  16732. if (errcount != 0)
  16733. {
  16734. unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
  16735. for (errnode = arm_data->erratumlist; errnode != 0;
  16736. errnode = errnode->next)
  16737. {
  16738. bfd_vma target = errnode->vma - offset;
  16739. switch (errnode->type)
  16740. {
  16741. case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
  16742. {
  16743. bfd_vma branch_to_veneer;
  16744. /* Original condition code of instruction, plus bit mask for
  16745. ARM B instruction. */
  16746. unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
  16747. | 0x0a000000;
  16748. /* The instruction is before the label. */
  16749. target -= 4;
  16750. /* Above offset included in -4 below. */
  16751. branch_to_veneer = errnode->u.b.veneer->vma
  16752. - errnode->vma - 4;
  16753. if ((signed) branch_to_veneer < -(1 << 25)
  16754. || (signed) branch_to_veneer >= (1 << 25))
  16755. _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
  16756. "range"), output_bfd);
  16757. insn |= (branch_to_veneer >> 2) & 0xffffff;
  16758. contents[endianflip ^ target] = insn & 0xff;
  16759. contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
  16760. contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
  16761. contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
  16762. }
  16763. break;
  16764. case VFP11_ERRATUM_ARM_VENEER:
  16765. {
  16766. bfd_vma branch_from_veneer;
  16767. unsigned int insn;
  16768. /* Take size of veneer into account. */
  16769. branch_from_veneer = errnode->u.v.branch->vma
  16770. - errnode->vma - 12;
  16771. if ((signed) branch_from_veneer < -(1 << 25)
  16772. || (signed) branch_from_veneer >= (1 << 25))
  16773. _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
  16774. "range"), output_bfd);
  16775. /* Original instruction. */
  16776. insn = errnode->u.v.branch->u.b.vfp_insn;
  16777. contents[endianflip ^ target] = insn & 0xff;
  16778. contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
  16779. contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
  16780. contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
  16781. /* Branch back to insn after original insn. */
  16782. insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
  16783. contents[endianflip ^ (target + 4)] = insn & 0xff;
  16784. contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
  16785. contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
  16786. contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
  16787. }
  16788. break;
  16789. default:
  16790. abort ();
  16791. }
  16792. }
  16793. }
  16794. if (arm_data->stm32l4xx_erratumcount != 0)
  16795. {
  16796. for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
  16797. stm32l4xx_errnode != 0;
  16798. stm32l4xx_errnode = stm32l4xx_errnode->next)
  16799. {
  16800. bfd_vma target = stm32l4xx_errnode->vma - offset;
  16801. switch (stm32l4xx_errnode->type)
  16802. {
  16803. case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
  16804. {
  16805. unsigned int insn;
  16806. bfd_vma branch_to_veneer =
  16807. stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
  16808. if ((signed) branch_to_veneer < -(1 << 24)
  16809. || (signed) branch_to_veneer >= (1 << 24))
  16810. {
  16811. bfd_vma out_of_range =
  16812. ((signed) branch_to_veneer < -(1 << 24)) ?
  16813. - branch_to_veneer - (1 << 24) :
  16814. ((signed) branch_to_veneer >= (1 << 24)) ?
  16815. branch_to_veneer - (1 << 24) : 0;
  16816. _bfd_error_handler
  16817. (_("%pB(%#" PRIx64 "): error: "
  16818. "cannot create STM32L4XX veneer; "
  16819. "jump out of range by %" PRId64 " bytes; "
  16820. "cannot encode branch instruction"),
  16821. output_bfd,
  16822. (uint64_t) (stm32l4xx_errnode->vma - 4),
  16823. (int64_t) out_of_range);
  16824. continue;
  16825. }
  16826. insn = create_instruction_branch_absolute
  16827. (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
  16828. /* The instruction is before the label. */
  16829. target -= 4;
  16830. put_thumb2_insn (globals, output_bfd,
  16831. (bfd_vma) insn, contents + target);
  16832. }
  16833. break;
  16834. case STM32L4XX_ERRATUM_VENEER:
  16835. {
  16836. bfd_byte * veneer;
  16837. bfd_byte * veneer_r;
  16838. unsigned int insn;
  16839. veneer = contents + target;
  16840. veneer_r = veneer
  16841. + stm32l4xx_errnode->u.b.veneer->vma
  16842. - stm32l4xx_errnode->vma - 4;
  16843. if ((signed) (veneer_r - veneer -
  16844. STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
  16845. STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
  16846. STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
  16847. STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
  16848. || (signed) (veneer_r - veneer) >= (1 << 24))
  16849. {
  16850. _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
  16851. "veneer"), output_bfd);
  16852. continue;
  16853. }
  16854. /* Original instruction. */
  16855. insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
  16856. stm32l4xx_create_replacing_stub
  16857. (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
  16858. }
  16859. break;
  16860. default:
  16861. abort ();
  16862. }
  16863. }
  16864. }
  16865. if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
  16866. {
  16867. arm_unwind_table_edit *edit_node
  16868. = arm_data->u.exidx.unwind_edit_list;
  16869. /* Now, sec->size is the size of the section we will write. The original
  16870. size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
  16871. markers) was sec->rawsize. (This isn't the case if we perform no
  16872. edits, then rawsize will be zero and we should use size). */
  16873. bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
  16874. unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
  16875. unsigned int in_index, out_index;
  16876. bfd_vma add_to_offsets = 0;
  16877. if (edited_contents == NULL)
  16878. return false;
  16879. for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
  16880. {
  16881. if (edit_node)
  16882. {
  16883. unsigned int edit_index = edit_node->index;
  16884. if (in_index < edit_index && in_index * 8 < input_size)
  16885. {
  16886. copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
  16887. contents + in_index * 8, add_to_offsets);
  16888. out_index++;
  16889. in_index++;
  16890. }
  16891. else if (in_index == edit_index
  16892. || (in_index * 8 >= input_size
  16893. && edit_index == UINT_MAX))
  16894. {
  16895. switch (edit_node->type)
  16896. {
  16897. case DELETE_EXIDX_ENTRY:
  16898. in_index++;
  16899. add_to_offsets += 8;
  16900. break;
  16901. case INSERT_EXIDX_CANTUNWIND_AT_END:
  16902. {
  16903. asection *text_sec = edit_node->linked_section;
  16904. bfd_vma text_offset = text_sec->output_section->vma
  16905. + text_sec->output_offset
  16906. + text_sec->size;
  16907. bfd_vma exidx_offset = offset + out_index * 8;
  16908. unsigned long prel31_offset;
  16909. /* Note: this is meant to be equivalent to an
  16910. R_ARM_PREL31 relocation. These synthetic
  16911. EXIDX_CANTUNWIND markers are not relocated by the
  16912. usual BFD method. */
  16913. prel31_offset = (text_offset - exidx_offset)
  16914. & 0x7ffffffful;
  16915. if (bfd_link_relocatable (link_info))
  16916. {
  16917. /* Here relocation for new EXIDX_CANTUNWIND is
  16918. created, so there is no need to
  16919. adjust offset by hand. */
  16920. prel31_offset = text_sec->output_offset
  16921. + text_sec->size;
  16922. }
  16923. /* First address we can't unwind. */
  16924. bfd_put_32 (output_bfd, prel31_offset,
  16925. &edited_contents[out_index * 8]);
  16926. /* Code for EXIDX_CANTUNWIND. */
  16927. bfd_put_32 (output_bfd, 0x1,
  16928. &edited_contents[out_index * 8 + 4]);
  16929. out_index++;
  16930. add_to_offsets -= 8;
  16931. }
  16932. break;
  16933. }
  16934. edit_node = edit_node->next;
  16935. }
  16936. }
  16937. else
  16938. {
  16939. /* No more edits, copy remaining entries verbatim. */
  16940. copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
  16941. contents + in_index * 8, add_to_offsets);
  16942. out_index++;
  16943. in_index++;
  16944. }
  16945. }
  16946. if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
  16947. bfd_set_section_contents (output_bfd, sec->output_section,
  16948. edited_contents,
  16949. (file_ptr) sec->output_offset, sec->size);
  16950. return true;
  16951. }
  16952. /* Fix code to point to Cortex-A8 erratum stubs. */
  16953. if (globals->fix_cortex_a8)
  16954. {
  16955. struct a8_branch_to_stub_data data;
  16956. data.writing_section = sec;
  16957. data.contents = contents;
  16958. bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
  16959. & data);
  16960. }
  16961. if (mapcount == 0)
  16962. return false;
  16963. if (globals->byteswap_code)
  16964. {
  16965. qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
  16966. ptr = map[0].vma;
  16967. for (i = 0; i < mapcount; i++)
  16968. {
  16969. if (i == mapcount - 1)
  16970. end = sec->size;
  16971. else
  16972. end = map[i + 1].vma;
  16973. switch (map[i].type)
  16974. {
  16975. case 'a':
  16976. /* Byte swap code words. */
  16977. while (ptr + 3 < end)
  16978. {
  16979. tmp = contents[ptr];
  16980. contents[ptr] = contents[ptr + 3];
  16981. contents[ptr + 3] = tmp;
  16982. tmp = contents[ptr + 1];
  16983. contents[ptr + 1] = contents[ptr + 2];
  16984. contents[ptr + 2] = tmp;
  16985. ptr += 4;
  16986. }
  16987. break;
  16988. case 't':
  16989. /* Byte swap code halfwords. */
  16990. while (ptr + 1 < end)
  16991. {
  16992. tmp = contents[ptr];
  16993. contents[ptr] = contents[ptr + 1];
  16994. contents[ptr + 1] = tmp;
  16995. ptr += 2;
  16996. }
  16997. break;
  16998. case 'd':
  16999. /* Leave data alone. */
  17000. break;
  17001. }
  17002. ptr = end;
  17003. }
  17004. }
  17005. free (map);
  17006. arm_data->mapcount = -1;
  17007. arm_data->mapsize = 0;
  17008. arm_data->map = NULL;
  17009. return false;
  17010. }
  17011. /* Mangle thumb function symbols as we read them in. */
  17012. static bool
  17013. elf32_arm_swap_symbol_in (bfd * abfd,
  17014. const void *psrc,
  17015. const void *pshn,
  17016. Elf_Internal_Sym *dst)
  17017. {
  17018. if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
  17019. return false;
  17020. dst->st_target_internal = 0;
  17021. /* New EABI objects mark thumb function symbols by setting the low bit of
  17022. the address. */
  17023. if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
  17024. || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
  17025. {
  17026. if (dst->st_value & 1)
  17027. {
  17028. dst->st_value &= ~(bfd_vma) 1;
  17029. ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
  17030. ST_BRANCH_TO_THUMB);
  17031. }
  17032. else
  17033. ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
  17034. }
  17035. else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
  17036. {
  17037. dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
  17038. ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
  17039. }
  17040. else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
  17041. ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
  17042. else
  17043. ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
  17044. return true;
  17045. }
  17046. /* Mangle thumb function symbols as we write them out. */
  17047. static void
  17048. elf32_arm_swap_symbol_out (bfd *abfd,
  17049. const Elf_Internal_Sym *src,
  17050. void *cdst,
  17051. void *shndx)
  17052. {
  17053. Elf_Internal_Sym newsym;
  17054. /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
  17055. of the address set, as per the new EABI. We do this unconditionally
  17056. because objcopy does not set the elf header flags until after
  17057. it writes out the symbol table. */
  17058. if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
  17059. {
  17060. newsym = *src;
  17061. if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
  17062. newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
  17063. if (newsym.st_shndx != SHN_UNDEF)
  17064. {
  17065. /* Do this only for defined symbols. At link type, the static
  17066. linker will simulate the work of dynamic linker of resolving
  17067. symbols and will carry over the thumbness of found symbols to
  17068. the output symbol table. It's not clear how it happens, but
  17069. the thumbness of undefined symbols can well be different at
  17070. runtime, and writing '1' for them will be confusing for users
  17071. and possibly for dynamic linker itself.
  17072. */
  17073. newsym.st_value |= 1;
  17074. }
  17075. src = &newsym;
  17076. }
  17077. bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
  17078. }
  17079. /* Add the PT_ARM_EXIDX program header. */
  17080. static bool
  17081. elf32_arm_modify_segment_map (bfd *abfd,
  17082. struct bfd_link_info *info ATTRIBUTE_UNUSED)
  17083. {
  17084. struct elf_segment_map *m;
  17085. asection *sec;
  17086. sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
  17087. if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
  17088. {
  17089. /* If there is already a PT_ARM_EXIDX header, then we do not
  17090. want to add another one. This situation arises when running
  17091. "strip"; the input binary already has the header. */
  17092. m = elf_seg_map (abfd);
  17093. while (m && m->p_type != PT_ARM_EXIDX)
  17094. m = m->next;
  17095. if (!m)
  17096. {
  17097. m = (struct elf_segment_map *)
  17098. bfd_zalloc (abfd, sizeof (struct elf_segment_map));
  17099. if (m == NULL)
  17100. return false;
  17101. m->p_type = PT_ARM_EXIDX;
  17102. m->count = 1;
  17103. m->sections[0] = sec;
  17104. m->next = elf_seg_map (abfd);
  17105. elf_seg_map (abfd) = m;
  17106. }
  17107. }
  17108. return true;
  17109. }
  17110. /* We may add a PT_ARM_EXIDX program header. */
  17111. static int
  17112. elf32_arm_additional_program_headers (bfd *abfd,
  17113. struct bfd_link_info *info ATTRIBUTE_UNUSED)
  17114. {
  17115. asection *sec;
  17116. sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
  17117. if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
  17118. return 1;
  17119. else
  17120. return 0;
  17121. }
  17122. /* Hook called by the linker routine which adds symbols from an object
  17123. file. */
  17124. static bool
  17125. elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
  17126. Elf_Internal_Sym *sym, const char **namep,
  17127. flagword *flagsp, asection **secp, bfd_vma *valp)
  17128. {
  17129. if (elf32_arm_hash_table (info) == NULL)
  17130. return false;
  17131. if (elf32_arm_hash_table (info)->root.target_os == is_vxworks
  17132. && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
  17133. flagsp, secp, valp))
  17134. return false;
  17135. return true;
  17136. }
  17137. /* We use this to override swap_symbol_in and swap_symbol_out. */
  17138. const struct elf_size_info elf32_arm_size_info =
  17139. {
  17140. sizeof (Elf32_External_Ehdr),
  17141. sizeof (Elf32_External_Phdr),
  17142. sizeof (Elf32_External_Shdr),
  17143. sizeof (Elf32_External_Rel),
  17144. sizeof (Elf32_External_Rela),
  17145. sizeof (Elf32_External_Sym),
  17146. sizeof (Elf32_External_Dyn),
  17147. sizeof (Elf_External_Note),
  17148. 4,
  17149. 1,
  17150. 32, 2,
  17151. ELFCLASS32, EV_CURRENT,
  17152. bfd_elf32_write_out_phdrs,
  17153. bfd_elf32_write_shdrs_and_ehdr,
  17154. bfd_elf32_checksum_contents,
  17155. bfd_elf32_write_relocs,
  17156. elf32_arm_swap_symbol_in,
  17157. elf32_arm_swap_symbol_out,
  17158. bfd_elf32_slurp_reloc_table,
  17159. bfd_elf32_slurp_symbol_table,
  17160. bfd_elf32_swap_dyn_in,
  17161. bfd_elf32_swap_dyn_out,
  17162. bfd_elf32_swap_reloc_in,
  17163. bfd_elf32_swap_reloc_out,
  17164. bfd_elf32_swap_reloca_in,
  17165. bfd_elf32_swap_reloca_out
  17166. };
  17167. static bfd_vma
  17168. read_code32 (const bfd *abfd, const bfd_byte *addr)
  17169. {
  17170. /* V7 BE8 code is always little endian. */
  17171. if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
  17172. return bfd_getl32 (addr);
  17173. return bfd_get_32 (abfd, addr);
  17174. }
  17175. static bfd_vma
  17176. read_code16 (const bfd *abfd, const bfd_byte *addr)
  17177. {
  17178. /* V7 BE8 code is always little endian. */
  17179. if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
  17180. return bfd_getl16 (addr);
  17181. return bfd_get_16 (abfd, addr);
  17182. }
  17183. /* Return size of plt0 entry starting at ADDR
  17184. or (bfd_vma) -1 if size can not be determined. */
  17185. static bfd_vma
  17186. elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
  17187. {
  17188. bfd_vma first_word;
  17189. bfd_vma plt0_size;
  17190. first_word = read_code32 (abfd, addr);
  17191. if (first_word == elf32_arm_plt0_entry[0])
  17192. plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
  17193. else if (first_word == elf32_thumb2_plt0_entry[0])
  17194. plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
  17195. else
  17196. /* We don't yet handle this PLT format. */
  17197. return (bfd_vma) -1;
  17198. return plt0_size;
  17199. }
  17200. /* Return size of plt entry starting at offset OFFSET
  17201. of plt section located at address START
  17202. or (bfd_vma) -1 if size can not be determined. */
  17203. static bfd_vma
  17204. elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
  17205. {
  17206. bfd_vma first_insn;
  17207. bfd_vma plt_size = 0;
  17208. const bfd_byte *addr = start + offset;
  17209. /* PLT entry size if fixed on Thumb-only platforms. */
  17210. if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
  17211. return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
  17212. /* Respect Thumb stub if necessary. */
  17213. if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
  17214. {
  17215. plt_size += 2 * ARRAY_SIZE (elf32_arm_plt_thumb_stub);
  17216. }
  17217. /* Strip immediate from first add. */
  17218. first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
  17219. #ifdef FOUR_WORD_PLT
  17220. if (first_insn == elf32_arm_plt_entry[0])
  17221. plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
  17222. #else
  17223. if (first_insn == elf32_arm_plt_entry_long[0])
  17224. plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
  17225. else if (first_insn == elf32_arm_plt_entry_short[0])
  17226. plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
  17227. #endif
  17228. else
  17229. /* We don't yet handle this PLT format. */
  17230. return (bfd_vma) -1;
  17231. return plt_size;
  17232. }
  17233. /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
  17234. static long
  17235. elf32_arm_get_synthetic_symtab (bfd *abfd,
  17236. long symcount ATTRIBUTE_UNUSED,
  17237. asymbol **syms ATTRIBUTE_UNUSED,
  17238. long dynsymcount,
  17239. asymbol **dynsyms,
  17240. asymbol **ret)
  17241. {
  17242. asection *relplt;
  17243. asymbol *s;
  17244. arelent *p;
  17245. long count, i, n;
  17246. size_t size;
  17247. Elf_Internal_Shdr *hdr;
  17248. char *names;
  17249. asection *plt;
  17250. bfd_vma offset;
  17251. bfd_byte *data;
  17252. *ret = NULL;
  17253. if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
  17254. return 0;
  17255. if (dynsymcount <= 0)
  17256. return 0;
  17257. relplt = bfd_get_section_by_name (abfd, ".rel.plt");
  17258. if (relplt == NULL)
  17259. return 0;
  17260. hdr = &elf_section_data (relplt)->this_hdr;
  17261. if (hdr->sh_link != elf_dynsymtab (abfd)
  17262. || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
  17263. return 0;
  17264. plt = bfd_get_section_by_name (abfd, ".plt");
  17265. if (plt == NULL)
  17266. return 0;
  17267. if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, true))
  17268. return -1;
  17269. data = plt->contents;
  17270. if (data == NULL)
  17271. {
  17272. if (!bfd_get_full_section_contents (abfd, (asection *) plt, &data) || data == NULL)
  17273. return -1;
  17274. bfd_cache_section_contents ((asection *) plt, data);
  17275. }
  17276. count = relplt->size / hdr->sh_entsize;
  17277. size = count * sizeof (asymbol);
  17278. p = relplt->relocation;
  17279. for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
  17280. {
  17281. size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
  17282. if (p->addend != 0)
  17283. size += sizeof ("+0x") - 1 + 8;
  17284. }
  17285. s = *ret = (asymbol *) bfd_malloc (size);
  17286. if (s == NULL)
  17287. return -1;
  17288. offset = elf32_arm_plt0_size (abfd, data);
  17289. if (offset == (bfd_vma) -1)
  17290. return -1;
  17291. names = (char *) (s + count);
  17292. p = relplt->relocation;
  17293. n = 0;
  17294. for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
  17295. {
  17296. size_t len;
  17297. bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
  17298. if (plt_size == (bfd_vma) -1)
  17299. break;
  17300. *s = **p->sym_ptr_ptr;
  17301. /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
  17302. we are defining a symbol, ensure one of them is set. */
  17303. if ((s->flags & BSF_LOCAL) == 0)
  17304. s->flags |= BSF_GLOBAL;
  17305. s->flags |= BSF_SYNTHETIC;
  17306. s->section = plt;
  17307. s->value = offset;
  17308. s->name = names;
  17309. s->udata.p = NULL;
  17310. len = strlen ((*p->sym_ptr_ptr)->name);
  17311. memcpy (names, (*p->sym_ptr_ptr)->name, len);
  17312. names += len;
  17313. if (p->addend != 0)
  17314. {
  17315. char buf[30], *a;
  17316. memcpy (names, "+0x", sizeof ("+0x") - 1);
  17317. names += sizeof ("+0x") - 1;
  17318. bfd_sprintf_vma (abfd, buf, p->addend);
  17319. for (a = buf; *a == '0'; ++a)
  17320. ;
  17321. len = strlen (a);
  17322. memcpy (names, a, len);
  17323. names += len;
  17324. }
  17325. memcpy (names, "@plt", sizeof ("@plt"));
  17326. names += sizeof ("@plt");
  17327. ++s, ++n;
  17328. offset += plt_size;
  17329. }
  17330. return n;
  17331. }
  17332. static bool
  17333. elf32_arm_section_flags (const Elf_Internal_Shdr *hdr)
  17334. {
  17335. if (hdr->sh_flags & SHF_ARM_PURECODE)
  17336. hdr->bfd_section->flags |= SEC_ELF_PURECODE;
  17337. return true;
  17338. }
  17339. static flagword
  17340. elf32_arm_lookup_section_flags (char *flag_name)
  17341. {
  17342. if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
  17343. return SHF_ARM_PURECODE;
  17344. return SEC_NO_FLAGS;
  17345. }
  17346. static unsigned int
  17347. elf32_arm_count_additional_relocs (asection *sec)
  17348. {
  17349. struct _arm_elf_section_data *arm_data;
  17350. arm_data = get_arm_elf_section_data (sec);
  17351. return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
  17352. }
  17353. /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
  17354. has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
  17355. FALSE otherwise. ISECTION is the best guess matching section from the
  17356. input bfd IBFD, but it might be NULL. */
  17357. static bool
  17358. elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
  17359. bfd *obfd ATTRIBUTE_UNUSED,
  17360. const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
  17361. Elf_Internal_Shdr *osection)
  17362. {
  17363. switch (osection->sh_type)
  17364. {
  17365. case SHT_ARM_EXIDX:
  17366. {
  17367. Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
  17368. Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
  17369. unsigned i = 0;
  17370. osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
  17371. osection->sh_info = 0;
  17372. /* The sh_link field must be set to the text section associated with
  17373. this index section. Unfortunately the ARM EHABI does not specify
  17374. exactly how to determine this association. Our caller does try
  17375. to match up OSECTION with its corresponding input section however
  17376. so that is a good first guess. */
  17377. if (isection != NULL
  17378. && osection->bfd_section != NULL
  17379. && isection->bfd_section != NULL
  17380. && isection->bfd_section->output_section != NULL
  17381. && isection->bfd_section->output_section == osection->bfd_section
  17382. && iheaders != NULL
  17383. && isection->sh_link > 0
  17384. && isection->sh_link < elf_numsections (ibfd)
  17385. && iheaders[isection->sh_link]->bfd_section != NULL
  17386. && iheaders[isection->sh_link]->bfd_section->output_section != NULL
  17387. )
  17388. {
  17389. for (i = elf_numsections (obfd); i-- > 0;)
  17390. if (oheaders[i]->bfd_section
  17391. == iheaders[isection->sh_link]->bfd_section->output_section)
  17392. break;
  17393. }
  17394. if (i == 0)
  17395. {
  17396. /* Failing that we have to find a matching section ourselves. If
  17397. we had the output section name available we could compare that
  17398. with input section names. Unfortunately we don't. So instead
  17399. we use a simple heuristic and look for the nearest executable
  17400. section before this one. */
  17401. for (i = elf_numsections (obfd); i-- > 0;)
  17402. if (oheaders[i] == osection)
  17403. break;
  17404. if (i == 0)
  17405. break;
  17406. while (i-- > 0)
  17407. if (oheaders[i]->sh_type == SHT_PROGBITS
  17408. && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
  17409. == (SHF_ALLOC | SHF_EXECINSTR))
  17410. break;
  17411. }
  17412. if (i)
  17413. {
  17414. osection->sh_link = i;
  17415. /* If the text section was part of a group
  17416. then the index section should be too. */
  17417. if (oheaders[i]->sh_flags & SHF_GROUP)
  17418. osection->sh_flags |= SHF_GROUP;
  17419. return true;
  17420. }
  17421. }
  17422. break;
  17423. case SHT_ARM_PREEMPTMAP:
  17424. osection->sh_flags = SHF_ALLOC;
  17425. break;
  17426. case SHT_ARM_ATTRIBUTES:
  17427. case SHT_ARM_DEBUGOVERLAY:
  17428. case SHT_ARM_OVERLAYSECTION:
  17429. default:
  17430. break;
  17431. }
  17432. return false;
  17433. }
  17434. /* Returns TRUE if NAME is an ARM mapping symbol.
  17435. Traditionally the symbols $a, $d and $t have been used.
  17436. The ARM ELF standard also defines $x (for A64 code). It also allows a
  17437. period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
  17438. Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
  17439. not support them here. $t.x indicates the start of ThumbEE instructions. */
  17440. static bool
  17441. is_arm_mapping_symbol (const char * name)
  17442. {
  17443. return name != NULL /* Paranoia. */
  17444. && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
  17445. the mapping symbols could have acquired a prefix.
  17446. We do not support this here, since such symbols no
  17447. longer conform to the ARM ELF ABI. */
  17448. && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
  17449. && (name[2] == 0 || name[2] == '.');
  17450. /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
  17451. any characters that follow the period are legal characters for the body
  17452. of a symbol's name. For now we just assume that this is the case. */
  17453. }
  17454. /* Make sure that mapping symbols in object files are not removed via the
  17455. "strip --strip-unneeded" tool. These symbols are needed in order to
  17456. correctly generate interworking veneers, and for byte swapping code
  17457. regions. Once an object file has been linked, it is safe to remove the
  17458. symbols as they will no longer be needed. */
  17459. static void
  17460. elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
  17461. {
  17462. if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
  17463. && sym->section != bfd_abs_section_ptr
  17464. && is_arm_mapping_symbol (sym->name))
  17465. sym->flags |= BSF_KEEP;
  17466. }
  17467. #undef elf_backend_copy_special_section_fields
  17468. #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
  17469. #define ELF_ARCH bfd_arch_arm
  17470. #define ELF_TARGET_ID ARM_ELF_DATA
  17471. #define ELF_MACHINE_CODE EM_ARM
  17472. #ifdef __QNXTARGET__
  17473. #define ELF_MAXPAGESIZE 0x1000
  17474. #else
  17475. #define ELF_MAXPAGESIZE 0x10000
  17476. #endif
  17477. #define ELF_COMMONPAGESIZE 0x1000
  17478. #define bfd_elf32_mkobject elf32_arm_mkobject
  17479. #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
  17480. #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
  17481. #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
  17482. #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
  17483. #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
  17484. #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
  17485. #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
  17486. #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
  17487. #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
  17488. #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
  17489. #define bfd_elf32_bfd_final_link elf32_arm_final_link
  17490. #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
  17491. #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
  17492. #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
  17493. #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
  17494. #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
  17495. #define elf_backend_check_relocs elf32_arm_check_relocs
  17496. #define elf_backend_update_relocs elf32_arm_update_relocs
  17497. #define elf_backend_relocate_section elf32_arm_relocate_section
  17498. #define elf_backend_write_section elf32_arm_write_section
  17499. #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
  17500. #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
  17501. #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
  17502. #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
  17503. #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
  17504. #define elf_backend_always_size_sections elf32_arm_always_size_sections
  17505. #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
  17506. #define elf_backend_init_file_header elf32_arm_init_file_header
  17507. #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
  17508. #define elf_backend_object_p elf32_arm_object_p
  17509. #define elf_backend_fake_sections elf32_arm_fake_sections
  17510. #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
  17511. #define elf_backend_final_write_processing elf32_arm_final_write_processing
  17512. #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
  17513. #define elf_backend_size_info elf32_arm_size_info
  17514. #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
  17515. #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
  17516. #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
  17517. #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
  17518. #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
  17519. #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
  17520. #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
  17521. #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
  17522. #define elf_backend_can_refcount 1
  17523. #define elf_backend_can_gc_sections 1
  17524. #define elf_backend_plt_readonly 1
  17525. #define elf_backend_want_got_plt 1
  17526. #define elf_backend_want_plt_sym 0
  17527. #define elf_backend_want_dynrelro 1
  17528. #define elf_backend_may_use_rel_p 1
  17529. #define elf_backend_may_use_rela_p 0
  17530. #define elf_backend_default_use_rela_p 0
  17531. #define elf_backend_dtrel_excludes_plt 1
  17532. #define elf_backend_got_header_size 12
  17533. #define elf_backend_extern_protected_data 1
  17534. #undef elf_backend_obj_attrs_vendor
  17535. #define elf_backend_obj_attrs_vendor "aeabi"
  17536. #undef elf_backend_obj_attrs_section
  17537. #define elf_backend_obj_attrs_section ".ARM.attributes"
  17538. #undef elf_backend_obj_attrs_arg_type
  17539. #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
  17540. #undef elf_backend_obj_attrs_section_type
  17541. #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
  17542. #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
  17543. #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
  17544. #undef elf_backend_section_flags
  17545. #define elf_backend_section_flags elf32_arm_section_flags
  17546. #undef elf_backend_lookup_section_flags_hook
  17547. #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
  17548. #define elf_backend_linux_prpsinfo32_ugid16 true
  17549. #include "elf32-target.h"
  17550. /* Native Client targets. */
  17551. #undef TARGET_LITTLE_SYM
  17552. #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
  17553. #undef TARGET_LITTLE_NAME
  17554. #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
  17555. #undef TARGET_BIG_SYM
  17556. #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
  17557. #undef TARGET_BIG_NAME
  17558. #define TARGET_BIG_NAME "elf32-bigarm-nacl"
  17559. /* Like elf32_arm_link_hash_table_create -- but overrides
  17560. appropriately for NaCl. */
  17561. static struct bfd_link_hash_table *
  17562. elf32_arm_nacl_link_hash_table_create (bfd *abfd)
  17563. {
  17564. struct bfd_link_hash_table *ret;
  17565. ret = elf32_arm_link_hash_table_create (abfd);
  17566. if (ret)
  17567. {
  17568. struct elf32_arm_link_hash_table *htab
  17569. = (struct elf32_arm_link_hash_table *) ret;
  17570. htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
  17571. htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
  17572. }
  17573. return ret;
  17574. }
  17575. /* Since NaCl doesn't use the ARM-specific unwind format, we don't
  17576. really need to use elf32_arm_modify_segment_map. But we do it
  17577. anyway just to reduce gratuitous differences with the stock ARM backend. */
  17578. static bool
  17579. elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
  17580. {
  17581. return (elf32_arm_modify_segment_map (abfd, info)
  17582. && nacl_modify_segment_map (abfd, info));
  17583. }
  17584. static bool
  17585. elf32_arm_nacl_final_write_processing (bfd *abfd)
  17586. {
  17587. arm_final_write_processing (abfd);
  17588. return nacl_final_write_processing (abfd);
  17589. }
  17590. static bfd_vma
  17591. elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
  17592. const arelent *rel ATTRIBUTE_UNUSED)
  17593. {
  17594. return plt->vma
  17595. + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
  17596. i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
  17597. }
  17598. #undef elf32_bed
  17599. #define elf32_bed elf32_arm_nacl_bed
  17600. #undef bfd_elf32_bfd_link_hash_table_create
  17601. #define bfd_elf32_bfd_link_hash_table_create \
  17602. elf32_arm_nacl_link_hash_table_create
  17603. #undef elf_backend_plt_alignment
  17604. #define elf_backend_plt_alignment 4
  17605. #undef elf_backend_modify_segment_map
  17606. #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
  17607. #undef elf_backend_modify_headers
  17608. #define elf_backend_modify_headers nacl_modify_headers
  17609. #undef elf_backend_final_write_processing
  17610. #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
  17611. #undef bfd_elf32_get_synthetic_symtab
  17612. #undef elf_backend_plt_sym_val
  17613. #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
  17614. #undef elf_backend_copy_special_section_fields
  17615. #undef ELF_MINPAGESIZE
  17616. #undef ELF_COMMONPAGESIZE
  17617. #undef ELF_TARGET_OS
  17618. #define ELF_TARGET_OS is_nacl
  17619. #include "elf32-target.h"
  17620. /* Reset to defaults. */
  17621. #undef elf_backend_plt_alignment
  17622. #undef elf_backend_modify_segment_map
  17623. #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
  17624. #undef elf_backend_modify_headers
  17625. #undef elf_backend_final_write_processing
  17626. #define elf_backend_final_write_processing elf32_arm_final_write_processing
  17627. #undef ELF_MINPAGESIZE
  17628. #undef ELF_COMMONPAGESIZE
  17629. #define ELF_COMMONPAGESIZE 0x1000
  17630. /* FDPIC Targets. */
  17631. #undef TARGET_LITTLE_SYM
  17632. #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
  17633. #undef TARGET_LITTLE_NAME
  17634. #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
  17635. #undef TARGET_BIG_SYM
  17636. #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
  17637. #undef TARGET_BIG_NAME
  17638. #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
  17639. #undef elf_match_priority
  17640. #define elf_match_priority 128
  17641. #undef ELF_OSABI
  17642. #define ELF_OSABI ELFOSABI_ARM_FDPIC
  17643. /* Like elf32_arm_link_hash_table_create -- but overrides
  17644. appropriately for FDPIC. */
  17645. static struct bfd_link_hash_table *
  17646. elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
  17647. {
  17648. struct bfd_link_hash_table *ret;
  17649. ret = elf32_arm_link_hash_table_create (abfd);
  17650. if (ret)
  17651. {
  17652. struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
  17653. htab->fdpic_p = 1;
  17654. }
  17655. return ret;
  17656. }
  17657. /* We need dynamic symbols for every section, since segments can
  17658. relocate independently. */
  17659. static bool
  17660. elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
  17661. struct bfd_link_info *info
  17662. ATTRIBUTE_UNUSED,
  17663. asection *p ATTRIBUTE_UNUSED)
  17664. {
  17665. switch (elf_section_data (p)->this_hdr.sh_type)
  17666. {
  17667. case SHT_PROGBITS:
  17668. case SHT_NOBITS:
  17669. /* If sh_type is yet undecided, assume it could be
  17670. SHT_PROGBITS/SHT_NOBITS. */
  17671. case SHT_NULL:
  17672. return false;
  17673. /* There shouldn't be section relative relocations
  17674. against any other section. */
  17675. default:
  17676. return true;
  17677. }
  17678. }
  17679. #undef elf32_bed
  17680. #define elf32_bed elf32_arm_fdpic_bed
  17681. #undef bfd_elf32_bfd_link_hash_table_create
  17682. #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
  17683. #undef elf_backend_omit_section_dynsym
  17684. #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
  17685. #undef ELF_TARGET_OS
  17686. #include "elf32-target.h"
  17687. #undef elf_match_priority
  17688. #undef ELF_OSABI
  17689. #undef elf_backend_omit_section_dynsym
  17690. /* VxWorks Targets. */
  17691. #undef TARGET_LITTLE_SYM
  17692. #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
  17693. #undef TARGET_LITTLE_NAME
  17694. #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
  17695. #undef TARGET_BIG_SYM
  17696. #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
  17697. #undef TARGET_BIG_NAME
  17698. #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
  17699. /* Like elf32_arm_link_hash_table_create -- but overrides
  17700. appropriately for VxWorks. */
  17701. static struct bfd_link_hash_table *
  17702. elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
  17703. {
  17704. struct bfd_link_hash_table *ret;
  17705. ret = elf32_arm_link_hash_table_create (abfd);
  17706. if (ret)
  17707. {
  17708. struct elf32_arm_link_hash_table *htab
  17709. = (struct elf32_arm_link_hash_table *) ret;
  17710. htab->use_rel = 0;
  17711. }
  17712. return ret;
  17713. }
  17714. static bool
  17715. elf32_arm_vxworks_final_write_processing (bfd *abfd)
  17716. {
  17717. arm_final_write_processing (abfd);
  17718. return elf_vxworks_final_write_processing (abfd);
  17719. }
  17720. #undef elf32_bed
  17721. #define elf32_bed elf32_arm_vxworks_bed
  17722. #undef bfd_elf32_bfd_link_hash_table_create
  17723. #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
  17724. #undef elf_backend_final_write_processing
  17725. #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
  17726. #undef elf_backend_emit_relocs
  17727. #define elf_backend_emit_relocs elf_vxworks_emit_relocs
  17728. #undef elf_backend_may_use_rel_p
  17729. #define elf_backend_may_use_rel_p 0
  17730. #undef elf_backend_may_use_rela_p
  17731. #define elf_backend_may_use_rela_p 1
  17732. #undef elf_backend_default_use_rela_p
  17733. #define elf_backend_default_use_rela_p 1
  17734. #undef elf_backend_want_plt_sym
  17735. #define elf_backend_want_plt_sym 1
  17736. #undef ELF_MAXPAGESIZE
  17737. #define ELF_MAXPAGESIZE 0x1000
  17738. #undef ELF_TARGET_OS
  17739. #define ELF_TARGET_OS is_vxworks
  17740. #include "elf32-target.h"
  17741. /* Merge backend specific data from an object file to the output
  17742. object file when linking. */
  17743. static bool
  17744. elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
  17745. {
  17746. bfd *obfd = info->output_bfd;
  17747. flagword out_flags;
  17748. flagword in_flags;
  17749. bool flags_compatible = true;
  17750. asection *sec;
  17751. /* Check if we have the same endianness. */
  17752. if (! _bfd_generic_verify_endian_match (ibfd, info))
  17753. return false;
  17754. if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
  17755. return true;
  17756. if (!elf32_arm_merge_eabi_attributes (ibfd, info))
  17757. return false;
  17758. /* The input BFD must have had its flags initialised. */
  17759. /* The following seems bogus to me -- The flags are initialized in
  17760. the assembler but I don't think an elf_flags_init field is
  17761. written into the object. */
  17762. /* BFD_ASSERT (elf_flags_init (ibfd)); */
  17763. in_flags = elf_elfheader (ibfd)->e_flags;
  17764. out_flags = elf_elfheader (obfd)->e_flags;
  17765. /* In theory there is no reason why we couldn't handle this. However
  17766. in practice it isn't even close to working and there is no real
  17767. reason to want it. */
  17768. if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
  17769. && !(ibfd->flags & DYNAMIC)
  17770. && (in_flags & EF_ARM_BE8))
  17771. {
  17772. _bfd_error_handler (_("error: %pB is already in final BE8 format"),
  17773. ibfd);
  17774. return false;
  17775. }
  17776. if (!elf_flags_init (obfd))
  17777. {
  17778. /* If the input is the default architecture and had the default
  17779. flags then do not bother setting the flags for the output
  17780. architecture, instead allow future merges to do this. If no
  17781. future merges ever set these flags then they will retain their
  17782. uninitialised values, which surprise surprise, correspond
  17783. to the default values. */
  17784. if (bfd_get_arch_info (ibfd)->the_default
  17785. && elf_elfheader (ibfd)->e_flags == 0)
  17786. return true;
  17787. elf_flags_init (obfd) = true;
  17788. elf_elfheader (obfd)->e_flags = in_flags;
  17789. if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
  17790. && bfd_get_arch_info (obfd)->the_default)
  17791. return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
  17792. return true;
  17793. }
  17794. /* Determine what should happen if the input ARM architecture
  17795. does not match the output ARM architecture. */
  17796. if (! bfd_arm_merge_machines (ibfd, obfd))
  17797. return false;
  17798. /* Identical flags must be compatible. */
  17799. if (in_flags == out_flags)
  17800. return true;
  17801. /* Check to see if the input BFD actually contains any sections. If
  17802. not, its flags may not have been initialised either, but it
  17803. cannot actually cause any incompatiblity. Do not short-circuit
  17804. dynamic objects; their section list may be emptied by
  17805. elf_link_add_object_symbols.
  17806. Also check to see if there are no code sections in the input.
  17807. In this case there is no need to check for code specific flags.
  17808. XXX - do we need to worry about floating-point format compatability
  17809. in data sections ? */
  17810. if (!(ibfd->flags & DYNAMIC))
  17811. {
  17812. bool null_input_bfd = true;
  17813. bool only_data_sections = true;
  17814. for (sec = ibfd->sections; sec != NULL; sec = sec->next)
  17815. {
  17816. /* Ignore synthetic glue sections. */
  17817. if (strcmp (sec->name, ".glue_7")
  17818. && strcmp (sec->name, ".glue_7t"))
  17819. {
  17820. if ((bfd_section_flags (sec)
  17821. & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
  17822. == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
  17823. only_data_sections = false;
  17824. null_input_bfd = false;
  17825. break;
  17826. }
  17827. }
  17828. if (null_input_bfd || only_data_sections)
  17829. return true;
  17830. }
  17831. /* Complain about various flag mismatches. */
  17832. if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
  17833. EF_ARM_EABI_VERSION (out_flags)))
  17834. {
  17835. _bfd_error_handler
  17836. (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
  17837. ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
  17838. obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
  17839. return false;
  17840. }
  17841. /* Not sure what needs to be checked for EABI versions >= 1. */
  17842. /* VxWorks libraries do not use these flags. */
  17843. if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
  17844. && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
  17845. && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
  17846. {
  17847. if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
  17848. {
  17849. _bfd_error_handler
  17850. (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
  17851. ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
  17852. obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
  17853. flags_compatible = false;
  17854. }
  17855. if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
  17856. {
  17857. if (in_flags & EF_ARM_APCS_FLOAT)
  17858. _bfd_error_handler
  17859. (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
  17860. ibfd, obfd);
  17861. else
  17862. _bfd_error_handler
  17863. (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
  17864. ibfd, obfd);
  17865. flags_compatible = false;
  17866. }
  17867. if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
  17868. {
  17869. if (in_flags & EF_ARM_VFP_FLOAT)
  17870. _bfd_error_handler
  17871. (_("error: %pB uses %s instructions, whereas %pB does not"),
  17872. ibfd, "VFP", obfd);
  17873. else
  17874. _bfd_error_handler
  17875. (_("error: %pB uses %s instructions, whereas %pB does not"),
  17876. ibfd, "FPA", obfd);
  17877. flags_compatible = false;
  17878. }
  17879. if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
  17880. {
  17881. if (in_flags & EF_ARM_MAVERICK_FLOAT)
  17882. _bfd_error_handler
  17883. (_("error: %pB uses %s instructions, whereas %pB does not"),
  17884. ibfd, "Maverick", obfd);
  17885. else
  17886. _bfd_error_handler
  17887. (_("error: %pB does not use %s instructions, whereas %pB does"),
  17888. ibfd, "Maverick", obfd);
  17889. flags_compatible = false;
  17890. }
  17891. #ifdef EF_ARM_SOFT_FLOAT
  17892. if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
  17893. {
  17894. /* We can allow interworking between code that is VFP format
  17895. layout, and uses either soft float or integer regs for
  17896. passing floating point arguments and results. We already
  17897. know that the APCS_FLOAT flags match; similarly for VFP
  17898. flags. */
  17899. if ((in_flags & EF_ARM_APCS_FLOAT) != 0
  17900. || (in_flags & EF_ARM_VFP_FLOAT) == 0)
  17901. {
  17902. if (in_flags & EF_ARM_SOFT_FLOAT)
  17903. _bfd_error_handler
  17904. (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
  17905. ibfd, obfd);
  17906. else
  17907. _bfd_error_handler
  17908. (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
  17909. ibfd, obfd);
  17910. flags_compatible = false;
  17911. }
  17912. }
  17913. #endif
  17914. /* Interworking mismatch is only a warning. */
  17915. if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
  17916. {
  17917. if (in_flags & EF_ARM_INTERWORK)
  17918. {
  17919. _bfd_error_handler
  17920. (_("warning: %pB supports interworking, whereas %pB does not"),
  17921. ibfd, obfd);
  17922. }
  17923. else
  17924. {
  17925. _bfd_error_handler
  17926. (_("warning: %pB does not support interworking, whereas %pB does"),
  17927. ibfd, obfd);
  17928. }
  17929. }
  17930. }
  17931. return flags_compatible;
  17932. }