elf32-spu.c 149 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542
  1. /* SPU specific support for 32-bit ELF
  2. Copyright (C) 2006-2022 Free Software Foundation, Inc.
  3. This file is part of BFD, the Binary File Descriptor library.
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 3 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License along
  13. with this program; if not, write to the Free Software Foundation, Inc.,
  14. 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
  15. #include "sysdep.h"
  16. #include "libiberty.h"
  17. #include "bfd.h"
  18. #include "bfdlink.h"
  19. #include "libbfd.h"
  20. #include "elf-bfd.h"
  21. #include "elf/spu.h"
  22. #include "elf32-spu.h"
  23. /* All users of this file have bfd_octets_per_byte (abfd, sec) == 1. */
  24. #define OCTETS_PER_BYTE(ABFD, SEC) 1
  25. /* We use RELA style relocs. Don't define USE_REL. */
  26. static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
  27. void *, asection *,
  28. bfd *, char **);
  29. /* Values of type 'enum elf_spu_reloc_type' are used to index this
  30. array, so it must be declared in the order of that type. */
  31. static reloc_howto_type elf_howto_table[] = {
  32. HOWTO (R_SPU_NONE, 0, 3, 0, false, 0, complain_overflow_dont,
  33. bfd_elf_generic_reloc, "SPU_NONE",
  34. false, 0, 0x00000000, false),
  35. HOWTO (R_SPU_ADDR10, 4, 2, 10, false, 14, complain_overflow_bitfield,
  36. bfd_elf_generic_reloc, "SPU_ADDR10",
  37. false, 0, 0x00ffc000, false),
  38. HOWTO (R_SPU_ADDR16, 2, 2, 16, false, 7, complain_overflow_bitfield,
  39. bfd_elf_generic_reloc, "SPU_ADDR16",
  40. false, 0, 0x007fff80, false),
  41. HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, false, 7, complain_overflow_bitfield,
  42. bfd_elf_generic_reloc, "SPU_ADDR16_HI",
  43. false, 0, 0x007fff80, false),
  44. HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, false, 7, complain_overflow_dont,
  45. bfd_elf_generic_reloc, "SPU_ADDR16_LO",
  46. false, 0, 0x007fff80, false),
  47. HOWTO (R_SPU_ADDR18, 0, 2, 18, false, 7, complain_overflow_bitfield,
  48. bfd_elf_generic_reloc, "SPU_ADDR18",
  49. false, 0, 0x01ffff80, false),
  50. HOWTO (R_SPU_ADDR32, 0, 2, 32, false, 0, complain_overflow_dont,
  51. bfd_elf_generic_reloc, "SPU_ADDR32",
  52. false, 0, 0xffffffff, false),
  53. HOWTO (R_SPU_REL16, 2, 2, 16, true, 7, complain_overflow_bitfield,
  54. bfd_elf_generic_reloc, "SPU_REL16",
  55. false, 0, 0x007fff80, true),
  56. HOWTO (R_SPU_ADDR7, 0, 2, 7, false, 14, complain_overflow_dont,
  57. bfd_elf_generic_reloc, "SPU_ADDR7",
  58. false, 0, 0x001fc000, false),
  59. HOWTO (R_SPU_REL9, 2, 2, 9, true, 0, complain_overflow_signed,
  60. spu_elf_rel9, "SPU_REL9",
  61. false, 0, 0x0180007f, true),
  62. HOWTO (R_SPU_REL9I, 2, 2, 9, true, 0, complain_overflow_signed,
  63. spu_elf_rel9, "SPU_REL9I",
  64. false, 0, 0x0000c07f, true),
  65. HOWTO (R_SPU_ADDR10I, 0, 2, 10, false, 14, complain_overflow_signed,
  66. bfd_elf_generic_reloc, "SPU_ADDR10I",
  67. false, 0, 0x00ffc000, false),
  68. HOWTO (R_SPU_ADDR16I, 0, 2, 16, false, 7, complain_overflow_signed,
  69. bfd_elf_generic_reloc, "SPU_ADDR16I",
  70. false, 0, 0x007fff80, false),
  71. HOWTO (R_SPU_REL32, 0, 2, 32, true, 0, complain_overflow_dont,
  72. bfd_elf_generic_reloc, "SPU_REL32",
  73. false, 0, 0xffffffff, true),
  74. HOWTO (R_SPU_ADDR16X, 0, 2, 16, false, 7, complain_overflow_bitfield,
  75. bfd_elf_generic_reloc, "SPU_ADDR16X",
  76. false, 0, 0x007fff80, false),
  77. HOWTO (R_SPU_PPU32, 0, 2, 32, false, 0, complain_overflow_dont,
  78. bfd_elf_generic_reloc, "SPU_PPU32",
  79. false, 0, 0xffffffff, false),
  80. HOWTO (R_SPU_PPU64, 0, 4, 64, false, 0, complain_overflow_dont,
  81. bfd_elf_generic_reloc, "SPU_PPU64",
  82. false, 0, -1, false),
  83. HOWTO (R_SPU_ADD_PIC, 0, 0, 0, false, 0, complain_overflow_dont,
  84. bfd_elf_generic_reloc, "SPU_ADD_PIC",
  85. false, 0, 0x00000000, false),
  86. };
  87. static struct bfd_elf_special_section const spu_elf_special_sections[] = {
  88. { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
  89. { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
  90. { NULL, 0, 0, 0, 0 }
  91. };
  92. static enum elf_spu_reloc_type
  93. spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
  94. {
  95. switch (code)
  96. {
  97. default:
  98. return (enum elf_spu_reloc_type) -1;
  99. case BFD_RELOC_NONE:
  100. return R_SPU_NONE;
  101. case BFD_RELOC_SPU_IMM10W:
  102. return R_SPU_ADDR10;
  103. case BFD_RELOC_SPU_IMM16W:
  104. return R_SPU_ADDR16;
  105. case BFD_RELOC_SPU_LO16:
  106. return R_SPU_ADDR16_LO;
  107. case BFD_RELOC_SPU_HI16:
  108. return R_SPU_ADDR16_HI;
  109. case BFD_RELOC_SPU_IMM18:
  110. return R_SPU_ADDR18;
  111. case BFD_RELOC_SPU_PCREL16:
  112. return R_SPU_REL16;
  113. case BFD_RELOC_SPU_IMM7:
  114. return R_SPU_ADDR7;
  115. case BFD_RELOC_SPU_IMM8:
  116. return R_SPU_NONE;
  117. case BFD_RELOC_SPU_PCREL9a:
  118. return R_SPU_REL9;
  119. case BFD_RELOC_SPU_PCREL9b:
  120. return R_SPU_REL9I;
  121. case BFD_RELOC_SPU_IMM10:
  122. return R_SPU_ADDR10I;
  123. case BFD_RELOC_SPU_IMM16:
  124. return R_SPU_ADDR16I;
  125. case BFD_RELOC_32:
  126. return R_SPU_ADDR32;
  127. case BFD_RELOC_32_PCREL:
  128. return R_SPU_REL32;
  129. case BFD_RELOC_SPU_PPU32:
  130. return R_SPU_PPU32;
  131. case BFD_RELOC_SPU_PPU64:
  132. return R_SPU_PPU64;
  133. case BFD_RELOC_SPU_ADD_PIC:
  134. return R_SPU_ADD_PIC;
  135. }
  136. }
  137. static bool
  138. spu_elf_info_to_howto (bfd *abfd,
  139. arelent *cache_ptr,
  140. Elf_Internal_Rela *dst)
  141. {
  142. enum elf_spu_reloc_type r_type;
  143. r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
  144. /* PR 17512: file: 90c2a92e. */
  145. if (r_type >= R_SPU_max)
  146. {
  147. /* xgettext:c-format */
  148. _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
  149. abfd, r_type);
  150. bfd_set_error (bfd_error_bad_value);
  151. return false;
  152. }
  153. cache_ptr->howto = &elf_howto_table[(int) r_type];
  154. return true;
  155. }
  156. static reloc_howto_type *
  157. spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
  158. bfd_reloc_code_real_type code)
  159. {
  160. enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
  161. if (r_type == (enum elf_spu_reloc_type) -1)
  162. return NULL;
  163. return elf_howto_table + r_type;
  164. }
  165. static reloc_howto_type *
  166. spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
  167. const char *r_name)
  168. {
  169. unsigned int i;
  170. for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
  171. if (elf_howto_table[i].name != NULL
  172. && strcasecmp (elf_howto_table[i].name, r_name) == 0)
  173. return &elf_howto_table[i];
  174. return NULL;
  175. }
  176. /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
  177. static bfd_reloc_status_type
  178. spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
  179. void *data, asection *input_section,
  180. bfd *output_bfd, char **error_message)
  181. {
  182. bfd_size_type octets;
  183. bfd_vma val;
  184. long insn;
  185. /* If this is a relocatable link (output_bfd test tells us), just
  186. call the generic function. Any adjustment will be done at final
  187. link time. */
  188. if (output_bfd != NULL)
  189. return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
  190. input_section, output_bfd, error_message);
  191. if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
  192. return bfd_reloc_outofrange;
  193. octets = reloc_entry->address * OCTETS_PER_BYTE (abfd, input_section);
  194. /* Get symbol value. */
  195. val = 0;
  196. if (!bfd_is_com_section (symbol->section))
  197. val = symbol->value;
  198. if (symbol->section->output_section)
  199. val += symbol->section->output_section->vma;
  200. val += reloc_entry->addend;
  201. /* Make it pc-relative. */
  202. val -= input_section->output_section->vma + input_section->output_offset;
  203. val >>= 2;
  204. if (val + 256 >= 512)
  205. return bfd_reloc_overflow;
  206. insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
  207. /* Move two high bits of value to REL9I and REL9 position.
  208. The mask will take care of selecting the right field. */
  209. val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
  210. insn &= ~reloc_entry->howto->dst_mask;
  211. insn |= val & reloc_entry->howto->dst_mask;
  212. bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
  213. return bfd_reloc_ok;
  214. }
  215. static bool
  216. spu_elf_new_section_hook (bfd *abfd, asection *sec)
  217. {
  218. if (!sec->used_by_bfd)
  219. {
  220. struct _spu_elf_section_data *sdata;
  221. sdata = bfd_zalloc (abfd, sizeof (*sdata));
  222. if (sdata == NULL)
  223. return false;
  224. sec->used_by_bfd = sdata;
  225. }
  226. return _bfd_elf_new_section_hook (abfd, sec);
  227. }
  228. /* Set up overlay info for executables. */
  229. static bool
  230. spu_elf_object_p (bfd *abfd)
  231. {
  232. if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
  233. {
  234. unsigned int i, num_ovl, num_buf;
  235. Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
  236. Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
  237. Elf_Internal_Phdr *last_phdr = NULL;
  238. for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
  239. if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
  240. {
  241. unsigned int j;
  242. ++num_ovl;
  243. if (last_phdr == NULL
  244. || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
  245. ++num_buf;
  246. last_phdr = phdr;
  247. for (j = 1; j < elf_numsections (abfd); j++)
  248. {
  249. Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
  250. if (ELF_SECTION_SIZE (shdr, phdr) != 0
  251. && ELF_SECTION_IN_SEGMENT (shdr, phdr))
  252. {
  253. asection *sec = shdr->bfd_section;
  254. spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
  255. spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
  256. }
  257. }
  258. }
  259. }
  260. return true;
  261. }
  262. /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
  263. strip --strip-unneeded will not remove them. */
  264. static void
  265. spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
  266. {
  267. if (sym->name != NULL
  268. && sym->section != bfd_abs_section_ptr
  269. && startswith (sym->name, "_EAR_"))
  270. sym->flags |= BSF_KEEP;
  271. }
  272. /* SPU ELF linker hash table. */
  273. struct spu_link_hash_table
  274. {
  275. struct elf_link_hash_table elf;
  276. struct spu_elf_params *params;
  277. /* Shortcuts to overlay sections. */
  278. asection *ovtab;
  279. asection *init;
  280. asection *toe;
  281. asection **ovl_sec;
  282. /* Count of stubs in each overlay section. */
  283. unsigned int *stub_count;
  284. /* The stub section for each overlay section. */
  285. asection **stub_sec;
  286. struct elf_link_hash_entry *ovly_entry[2];
  287. /* Number of overlay buffers. */
  288. unsigned int num_buf;
  289. /* Total number of overlays. */
  290. unsigned int num_overlays;
  291. /* For soft icache. */
  292. unsigned int line_size_log2;
  293. unsigned int num_lines_log2;
  294. unsigned int fromelem_size_log2;
  295. /* How much memory we have. */
  296. unsigned int local_store;
  297. /* Count of overlay stubs needed in non-overlay area. */
  298. unsigned int non_ovly_stub;
  299. /* Pointer to the fixup section */
  300. asection *sfixup;
  301. /* Set on error. */
  302. unsigned int stub_err : 1;
  303. };
  304. /* Hijack the generic got fields for overlay stub accounting. */
  305. struct got_entry
  306. {
  307. struct got_entry *next;
  308. unsigned int ovl;
  309. union {
  310. bfd_vma addend;
  311. bfd_vma br_addr;
  312. };
  313. bfd_vma stub_addr;
  314. };
  315. #define spu_hash_table(p) \
  316. ((is_elf_hash_table ((p)->hash) \
  317. && elf_hash_table_id (elf_hash_table (p)) == SPU_ELF_DATA) \
  318. ? (struct spu_link_hash_table *) (p)->hash : NULL)
  319. struct call_info
  320. {
  321. struct function_info *fun;
  322. struct call_info *next;
  323. unsigned int count;
  324. unsigned int max_depth;
  325. unsigned int is_tail : 1;
  326. unsigned int is_pasted : 1;
  327. unsigned int broken_cycle : 1;
  328. unsigned int priority : 13;
  329. };
  330. struct function_info
  331. {
  332. /* List of functions called. Also branches to hot/cold part of
  333. function. */
  334. struct call_info *call_list;
  335. /* For hot/cold part of function, point to owner. */
  336. struct function_info *start;
  337. /* Symbol at start of function. */
  338. union {
  339. Elf_Internal_Sym *sym;
  340. struct elf_link_hash_entry *h;
  341. } u;
  342. /* Function section. */
  343. asection *sec;
  344. asection *rodata;
  345. /* Where last called from, and number of sections called from. */
  346. asection *last_caller;
  347. unsigned int call_count;
  348. /* Address range of (this part of) function. */
  349. bfd_vma lo, hi;
  350. /* Offset where we found a store of lr, or -1 if none found. */
  351. bfd_vma lr_store;
  352. /* Offset where we found the stack adjustment insn. */
  353. bfd_vma sp_adjust;
  354. /* Stack usage. */
  355. int stack;
  356. /* Distance from root of call tree. Tail and hot/cold branches
  357. count as one deeper. We aren't counting stack frames here. */
  358. unsigned int depth;
  359. /* Set if global symbol. */
  360. unsigned int global : 1;
  361. /* Set if known to be start of function (as distinct from a hunk
  362. in hot/cold section. */
  363. unsigned int is_func : 1;
  364. /* Set if not a root node. */
  365. unsigned int non_root : 1;
  366. /* Flags used during call tree traversal. It's cheaper to replicate
  367. the visit flags than have one which needs clearing after a traversal. */
  368. unsigned int visit1 : 1;
  369. unsigned int visit2 : 1;
  370. unsigned int marking : 1;
  371. unsigned int visit3 : 1;
  372. unsigned int visit4 : 1;
  373. unsigned int visit5 : 1;
  374. unsigned int visit6 : 1;
  375. unsigned int visit7 : 1;
  376. };
  377. struct spu_elf_stack_info
  378. {
  379. int num_fun;
  380. int max_fun;
  381. /* Variable size array describing functions, one per contiguous
  382. address range belonging to a function. */
  383. struct function_info fun[1];
  384. };
  385. static struct function_info *find_function (asection *, bfd_vma,
  386. struct bfd_link_info *);
  387. /* Create a spu ELF linker hash table. */
  388. static struct bfd_link_hash_table *
  389. spu_elf_link_hash_table_create (bfd *abfd)
  390. {
  391. struct spu_link_hash_table *htab;
  392. htab = bfd_zmalloc (sizeof (*htab));
  393. if (htab == NULL)
  394. return NULL;
  395. if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
  396. _bfd_elf_link_hash_newfunc,
  397. sizeof (struct elf_link_hash_entry),
  398. SPU_ELF_DATA))
  399. {
  400. free (htab);
  401. return NULL;
  402. }
  403. htab->elf.init_got_refcount.refcount = 0;
  404. htab->elf.init_got_refcount.glist = NULL;
  405. htab->elf.init_got_offset.offset = 0;
  406. htab->elf.init_got_offset.glist = NULL;
  407. return &htab->elf.root;
  408. }
  409. void
  410. spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
  411. {
  412. bfd_vma max_branch_log2;
  413. struct spu_link_hash_table *htab = spu_hash_table (info);
  414. htab->params = params;
  415. htab->line_size_log2 = bfd_log2 (htab->params->line_size);
  416. htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
  417. /* For the software i-cache, we provide a "from" list whose size
  418. is a power-of-two number of quadwords, big enough to hold one
  419. byte per outgoing branch. Compute this number here. */
  420. max_branch_log2 = bfd_log2 (htab->params->max_branch);
  421. htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
  422. }
  423. /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
  424. to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
  425. *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
  426. static bool
  427. get_sym_h (struct elf_link_hash_entry **hp,
  428. Elf_Internal_Sym **symp,
  429. asection **symsecp,
  430. Elf_Internal_Sym **locsymsp,
  431. unsigned long r_symndx,
  432. bfd *ibfd)
  433. {
  434. Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
  435. if (r_symndx >= symtab_hdr->sh_info)
  436. {
  437. struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
  438. struct elf_link_hash_entry *h;
  439. h = sym_hashes[r_symndx - symtab_hdr->sh_info];
  440. while (h->root.type == bfd_link_hash_indirect
  441. || h->root.type == bfd_link_hash_warning)
  442. h = (struct elf_link_hash_entry *) h->root.u.i.link;
  443. if (hp != NULL)
  444. *hp = h;
  445. if (symp != NULL)
  446. *symp = NULL;
  447. if (symsecp != NULL)
  448. {
  449. asection *symsec = NULL;
  450. if (h->root.type == bfd_link_hash_defined
  451. || h->root.type == bfd_link_hash_defweak)
  452. symsec = h->root.u.def.section;
  453. *symsecp = symsec;
  454. }
  455. }
  456. else
  457. {
  458. Elf_Internal_Sym *sym;
  459. Elf_Internal_Sym *locsyms = *locsymsp;
  460. if (locsyms == NULL)
  461. {
  462. locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
  463. if (locsyms == NULL)
  464. locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
  465. symtab_hdr->sh_info,
  466. 0, NULL, NULL, NULL);
  467. if (locsyms == NULL)
  468. return false;
  469. *locsymsp = locsyms;
  470. }
  471. sym = locsyms + r_symndx;
  472. if (hp != NULL)
  473. *hp = NULL;
  474. if (symp != NULL)
  475. *symp = sym;
  476. if (symsecp != NULL)
  477. *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
  478. }
  479. return true;
  480. }
  481. /* Create the note section if not already present. This is done early so
  482. that the linker maps the sections to the right place in the output. */
  483. bool
  484. spu_elf_create_sections (struct bfd_link_info *info)
  485. {
  486. struct spu_link_hash_table *htab = spu_hash_table (info);
  487. bfd *ibfd;
  488. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  489. if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
  490. break;
  491. if (ibfd == NULL)
  492. {
  493. /* Make SPU_PTNOTE_SPUNAME section. */
  494. asection *s;
  495. size_t name_len;
  496. size_t size;
  497. bfd_byte *data;
  498. flagword flags;
  499. ibfd = info->input_bfds;
  500. /* This should really be SEC_LINKER_CREATED, but then we'd need
  501. to write out the section ourselves. */
  502. flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
  503. s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
  504. if (s == NULL
  505. || !bfd_set_section_alignment (s, 4))
  506. return false;
  507. /* Because we didn't set SEC_LINKER_CREATED we need to set the
  508. proper section type. */
  509. elf_section_type (s) = SHT_NOTE;
  510. name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
  511. size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
  512. size += (name_len + 3) & -4;
  513. if (!bfd_set_section_size (s, size))
  514. return false;
  515. data = bfd_zalloc (ibfd, size);
  516. if (data == NULL)
  517. return false;
  518. bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
  519. bfd_put_32 (ibfd, name_len, data + 4);
  520. bfd_put_32 (ibfd, 1, data + 8);
  521. memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
  522. memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
  523. bfd_get_filename (info->output_bfd), name_len);
  524. s->contents = data;
  525. }
  526. if (htab->params->emit_fixups)
  527. {
  528. asection *s;
  529. flagword flags;
  530. if (htab->elf.dynobj == NULL)
  531. htab->elf.dynobj = ibfd;
  532. ibfd = htab->elf.dynobj;
  533. flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
  534. | SEC_IN_MEMORY | SEC_LINKER_CREATED);
  535. s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
  536. if (s == NULL || !bfd_set_section_alignment (s, 2))
  537. return false;
  538. htab->sfixup = s;
  539. }
  540. return true;
  541. }
  542. /* qsort predicate to sort sections by vma. */
  543. static int
  544. sort_sections (const void *a, const void *b)
  545. {
  546. const asection *const *s1 = a;
  547. const asection *const *s2 = b;
  548. bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
  549. if (delta != 0)
  550. return delta < 0 ? -1 : 1;
  551. return (*s1)->index - (*s2)->index;
  552. }
  553. /* Identify overlays in the output bfd, and number them.
  554. Returns 0 on error, 1 if no overlays, 2 if overlays. */
  555. int
  556. spu_elf_find_overlays (struct bfd_link_info *info)
  557. {
  558. struct spu_link_hash_table *htab = spu_hash_table (info);
  559. asection **alloc_sec;
  560. unsigned int i, n, ovl_index, num_buf;
  561. asection *s;
  562. bfd_vma ovl_end;
  563. static const char *const entry_names[2][2] = {
  564. { "__ovly_load", "__icache_br_handler" },
  565. { "__ovly_return", "__icache_call_handler" }
  566. };
  567. if (info->output_bfd->section_count < 2)
  568. return 1;
  569. alloc_sec
  570. = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
  571. if (alloc_sec == NULL)
  572. return 0;
  573. /* Pick out all the alloced sections. */
  574. for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
  575. if ((s->flags & SEC_ALLOC) != 0
  576. && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
  577. && s->size != 0)
  578. alloc_sec[n++] = s;
  579. if (n == 0)
  580. {
  581. free (alloc_sec);
  582. return 1;
  583. }
  584. /* Sort them by vma. */
  585. qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
  586. ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
  587. if (htab->params->ovly_flavour == ovly_soft_icache)
  588. {
  589. unsigned int prev_buf = 0, set_id = 0;
  590. /* Look for an overlapping vma to find the first overlay section. */
  591. bfd_vma vma_start = 0;
  592. for (i = 1; i < n; i++)
  593. {
  594. s = alloc_sec[i];
  595. if (s->vma < ovl_end)
  596. {
  597. asection *s0 = alloc_sec[i - 1];
  598. vma_start = s0->vma;
  599. ovl_end = (s0->vma
  600. + ((bfd_vma) 1
  601. << (htab->num_lines_log2 + htab->line_size_log2)));
  602. --i;
  603. break;
  604. }
  605. else
  606. ovl_end = s->vma + s->size;
  607. }
  608. /* Now find any sections within the cache area. */
  609. for (ovl_index = 0, num_buf = 0; i < n; i++)
  610. {
  611. s = alloc_sec[i];
  612. if (s->vma >= ovl_end)
  613. break;
  614. /* A section in an overlay area called .ovl.init is not
  615. an overlay, in the sense that it might be loaded in
  616. by the overlay manager, but rather the initial
  617. section contents for the overlay buffer. */
  618. if (!startswith (s->name, ".ovl.init"))
  619. {
  620. num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
  621. set_id = (num_buf == prev_buf)? set_id + 1 : 0;
  622. prev_buf = num_buf;
  623. if ((s->vma - vma_start) & (htab->params->line_size - 1))
  624. {
  625. info->callbacks->einfo (_("%X%P: overlay section %pA "
  626. "does not start on a cache line\n"),
  627. s);
  628. bfd_set_error (bfd_error_bad_value);
  629. return 0;
  630. }
  631. else if (s->size > htab->params->line_size)
  632. {
  633. info->callbacks->einfo (_("%X%P: overlay section %pA "
  634. "is larger than a cache line\n"),
  635. s);
  636. bfd_set_error (bfd_error_bad_value);
  637. return 0;
  638. }
  639. alloc_sec[ovl_index++] = s;
  640. spu_elf_section_data (s)->u.o.ovl_index
  641. = (set_id << htab->num_lines_log2) + num_buf;
  642. spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
  643. }
  644. }
  645. /* Ensure there are no more overlay sections. */
  646. for ( ; i < n; i++)
  647. {
  648. s = alloc_sec[i];
  649. if (s->vma < ovl_end)
  650. {
  651. info->callbacks->einfo (_("%X%P: overlay section %pA "
  652. "is not in cache area\n"),
  653. alloc_sec[i-1]);
  654. bfd_set_error (bfd_error_bad_value);
  655. return 0;
  656. }
  657. else
  658. ovl_end = s->vma + s->size;
  659. }
  660. }
  661. else
  662. {
  663. /* Look for overlapping vmas. Any with overlap must be overlays.
  664. Count them. Also count the number of overlay regions. */
  665. for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
  666. {
  667. s = alloc_sec[i];
  668. if (s->vma < ovl_end)
  669. {
  670. asection *s0 = alloc_sec[i - 1];
  671. if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
  672. {
  673. ++num_buf;
  674. if (!startswith (s0->name, ".ovl.init"))
  675. {
  676. alloc_sec[ovl_index] = s0;
  677. spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
  678. spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
  679. }
  680. else
  681. ovl_end = s->vma + s->size;
  682. }
  683. if (!startswith (s->name, ".ovl.init"))
  684. {
  685. alloc_sec[ovl_index] = s;
  686. spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
  687. spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
  688. if (s0->vma != s->vma)
  689. {
  690. /* xgettext:c-format */
  691. info->callbacks->einfo (_("%X%P: overlay sections %pA "
  692. "and %pA do not start at the "
  693. "same address\n"),
  694. s0, s);
  695. bfd_set_error (bfd_error_bad_value);
  696. return 0;
  697. }
  698. if (ovl_end < s->vma + s->size)
  699. ovl_end = s->vma + s->size;
  700. }
  701. }
  702. else
  703. ovl_end = s->vma + s->size;
  704. }
  705. }
  706. htab->num_overlays = ovl_index;
  707. htab->num_buf = num_buf;
  708. htab->ovl_sec = alloc_sec;
  709. if (ovl_index == 0)
  710. return 1;
  711. for (i = 0; i < 2; i++)
  712. {
  713. const char *name;
  714. struct elf_link_hash_entry *h;
  715. name = entry_names[i][htab->params->ovly_flavour];
  716. h = elf_link_hash_lookup (&htab->elf, name, true, false, false);
  717. if (h == NULL)
  718. return 0;
  719. if (h->root.type == bfd_link_hash_new)
  720. {
  721. h->root.type = bfd_link_hash_undefined;
  722. h->ref_regular = 1;
  723. h->ref_regular_nonweak = 1;
  724. h->non_elf = 0;
  725. }
  726. htab->ovly_entry[i] = h;
  727. }
  728. return 2;
  729. }
  730. /* Non-zero to use bra in overlay stubs rather than br. */
  731. #define BRA_STUBS 0
  732. #define BRA 0x30000000
  733. #define BRASL 0x31000000
  734. #define BR 0x32000000
  735. #define BRSL 0x33000000
  736. #define NOP 0x40200000
  737. #define LNOP 0x00200000
  738. #define ILA 0x42000000
  739. /* Return true for all relative and absolute branch instructions.
  740. bra 00110000 0..
  741. brasl 00110001 0..
  742. br 00110010 0..
  743. brsl 00110011 0..
  744. brz 00100000 0..
  745. brnz 00100001 0..
  746. brhz 00100010 0..
  747. brhnz 00100011 0.. */
  748. static bool
  749. is_branch (const unsigned char *insn)
  750. {
  751. return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
  752. }
  753. /* Return true for all indirect branch instructions.
  754. bi 00110101 000
  755. bisl 00110101 001
  756. iret 00110101 010
  757. bisled 00110101 011
  758. biz 00100101 000
  759. binz 00100101 001
  760. bihz 00100101 010
  761. bihnz 00100101 011 */
  762. static bool
  763. is_indirect_branch (const unsigned char *insn)
  764. {
  765. return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
  766. }
  767. /* Return true for branch hint instructions.
  768. hbra 0001000..
  769. hbrr 0001001.. */
  770. static bool
  771. is_hint (const unsigned char *insn)
  772. {
  773. return (insn[0] & 0xfc) == 0x10;
  774. }
  775. /* True if INPUT_SECTION might need overlay stubs. */
  776. static bool
  777. maybe_needs_stubs (asection *input_section)
  778. {
  779. /* No stubs for debug sections and suchlike. */
  780. if ((input_section->flags & SEC_ALLOC) == 0)
  781. return false;
  782. /* No stubs for link-once sections that will be discarded. */
  783. if (input_section->output_section == bfd_abs_section_ptr)
  784. return false;
  785. /* Don't create stubs for .eh_frame references. */
  786. if (strcmp (input_section->name, ".eh_frame") == 0)
  787. return false;
  788. return true;
  789. }
  790. enum _stub_type
  791. {
  792. no_stub,
  793. call_ovl_stub,
  794. br000_ovl_stub,
  795. br001_ovl_stub,
  796. br010_ovl_stub,
  797. br011_ovl_stub,
  798. br100_ovl_stub,
  799. br101_ovl_stub,
  800. br110_ovl_stub,
  801. br111_ovl_stub,
  802. nonovl_stub,
  803. stub_error
  804. };
  805. /* Return non-zero if this reloc symbol should go via an overlay stub.
  806. Return 2 if the stub must be in non-overlay area. */
  807. static enum _stub_type
  808. needs_ovl_stub (struct elf_link_hash_entry *h,
  809. Elf_Internal_Sym *sym,
  810. asection *sym_sec,
  811. asection *input_section,
  812. Elf_Internal_Rela *irela,
  813. bfd_byte *contents,
  814. struct bfd_link_info *info)
  815. {
  816. struct spu_link_hash_table *htab = spu_hash_table (info);
  817. enum elf_spu_reloc_type r_type;
  818. unsigned int sym_type;
  819. bool branch, hint, call;
  820. enum _stub_type ret = no_stub;
  821. bfd_byte insn[4];
  822. if (sym_sec == NULL
  823. || sym_sec->output_section == bfd_abs_section_ptr
  824. || spu_elf_section_data (sym_sec->output_section) == NULL)
  825. return ret;
  826. if (h != NULL)
  827. {
  828. /* Ensure no stubs for user supplied overlay manager syms. */
  829. if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
  830. return ret;
  831. /* setjmp always goes via an overlay stub, because then the return
  832. and hence the longjmp goes via __ovly_return. That magically
  833. makes setjmp/longjmp between overlays work. */
  834. if (startswith (h->root.root.string, "setjmp")
  835. && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
  836. ret = call_ovl_stub;
  837. }
  838. if (h != NULL)
  839. sym_type = h->type;
  840. else
  841. sym_type = ELF_ST_TYPE (sym->st_info);
  842. r_type = ELF32_R_TYPE (irela->r_info);
  843. branch = false;
  844. hint = false;
  845. call = false;
  846. if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
  847. {
  848. if (contents == NULL)
  849. {
  850. contents = insn;
  851. if (!bfd_get_section_contents (input_section->owner,
  852. input_section,
  853. contents,
  854. irela->r_offset, 4))
  855. return stub_error;
  856. }
  857. else
  858. contents += irela->r_offset;
  859. branch = is_branch (contents);
  860. hint = is_hint (contents);
  861. if (branch || hint)
  862. {
  863. call = (contents[0] & 0xfd) == 0x31;
  864. if (call
  865. && sym_type != STT_FUNC
  866. && contents != insn)
  867. {
  868. /* It's common for people to write assembly and forget
  869. to give function symbols the right type. Handle
  870. calls to such symbols, but warn so that (hopefully)
  871. people will fix their code. We need the symbol
  872. type to be correct to distinguish function pointer
  873. initialisation from other pointer initialisations. */
  874. const char *sym_name;
  875. if (h != NULL)
  876. sym_name = h->root.root.string;
  877. else
  878. {
  879. Elf_Internal_Shdr *symtab_hdr;
  880. symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
  881. sym_name = bfd_elf_sym_name (input_section->owner,
  882. symtab_hdr,
  883. sym,
  884. sym_sec);
  885. }
  886. _bfd_error_handler
  887. /* xgettext:c-format */
  888. (_("warning: call to non-function symbol %s defined in %pB"),
  889. sym_name, sym_sec->owner);
  890. }
  891. }
  892. }
  893. if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
  894. || (sym_type != STT_FUNC
  895. && !(branch || hint)
  896. && (sym_sec->flags & SEC_CODE) == 0))
  897. return no_stub;
  898. /* Usually, symbols in non-overlay sections don't need stubs. */
  899. if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
  900. && !htab->params->non_overlay_stubs)
  901. return ret;
  902. /* A reference from some other section to a symbol in an overlay
  903. section needs a stub. */
  904. if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
  905. != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
  906. {
  907. unsigned int lrlive = 0;
  908. if (branch)
  909. lrlive = (contents[1] & 0x70) >> 4;
  910. if (!lrlive && (call || sym_type == STT_FUNC))
  911. ret = call_ovl_stub;
  912. else
  913. ret = br000_ovl_stub + lrlive;
  914. }
  915. /* If this insn isn't a branch then we are possibly taking the
  916. address of a function and passing it out somehow. Soft-icache code
  917. always generates inline code to do indirect branches. */
  918. if (!(branch || hint)
  919. && sym_type == STT_FUNC
  920. && htab->params->ovly_flavour != ovly_soft_icache)
  921. ret = nonovl_stub;
  922. return ret;
  923. }
  924. static bool
  925. count_stub (struct spu_link_hash_table *htab,
  926. bfd *ibfd,
  927. asection *isec,
  928. enum _stub_type stub_type,
  929. struct elf_link_hash_entry *h,
  930. const Elf_Internal_Rela *irela)
  931. {
  932. unsigned int ovl = 0;
  933. struct got_entry *g, **head;
  934. bfd_vma addend;
  935. /* If this instruction is a branch or call, we need a stub
  936. for it. One stub per function per overlay.
  937. If it isn't a branch, then we are taking the address of
  938. this function so need a stub in the non-overlay area
  939. for it. One stub per function. */
  940. if (stub_type != nonovl_stub)
  941. ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
  942. if (h != NULL)
  943. head = &h->got.glist;
  944. else
  945. {
  946. if (elf_local_got_ents (ibfd) == NULL)
  947. {
  948. bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
  949. * sizeof (*elf_local_got_ents (ibfd)));
  950. elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
  951. if (elf_local_got_ents (ibfd) == NULL)
  952. return false;
  953. }
  954. head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
  955. }
  956. if (htab->params->ovly_flavour == ovly_soft_icache)
  957. {
  958. htab->stub_count[ovl] += 1;
  959. return true;
  960. }
  961. addend = 0;
  962. if (irela != NULL)
  963. addend = irela->r_addend;
  964. if (ovl == 0)
  965. {
  966. struct got_entry *gnext;
  967. for (g = *head; g != NULL; g = g->next)
  968. if (g->addend == addend && g->ovl == 0)
  969. break;
  970. if (g == NULL)
  971. {
  972. /* Need a new non-overlay area stub. Zap other stubs. */
  973. for (g = *head; g != NULL; g = gnext)
  974. {
  975. gnext = g->next;
  976. if (g->addend == addend)
  977. {
  978. htab->stub_count[g->ovl] -= 1;
  979. free (g);
  980. }
  981. }
  982. }
  983. }
  984. else
  985. {
  986. for (g = *head; g != NULL; g = g->next)
  987. if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
  988. break;
  989. }
  990. if (g == NULL)
  991. {
  992. g = bfd_malloc (sizeof *g);
  993. if (g == NULL)
  994. return false;
  995. g->ovl = ovl;
  996. g->addend = addend;
  997. g->stub_addr = (bfd_vma) -1;
  998. g->next = *head;
  999. *head = g;
  1000. htab->stub_count[ovl] += 1;
  1001. }
  1002. return true;
  1003. }
  1004. /* Support two sizes of overlay stubs, a slower more compact stub of two
  1005. instructions, and a faster stub of four instructions.
  1006. Soft-icache stubs are four or eight words. */
  1007. static unsigned int
  1008. ovl_stub_size (struct spu_elf_params *params)
  1009. {
  1010. return 16 << params->ovly_flavour >> params->compact_stub;
  1011. }
  1012. static unsigned int
  1013. ovl_stub_size_log2 (struct spu_elf_params *params)
  1014. {
  1015. return 4 + params->ovly_flavour - params->compact_stub;
  1016. }
  1017. /* Two instruction overlay stubs look like:
  1018. brsl $75,__ovly_load
  1019. .word target_ovl_and_address
  1020. ovl_and_address is a word with the overlay number in the top 14 bits
  1021. and local store address in the bottom 18 bits.
  1022. Four instruction overlay stubs look like:
  1023. ila $78,ovl_number
  1024. lnop
  1025. ila $79,target_address
  1026. br __ovly_load
  1027. Software icache stubs are:
  1028. .word target_index
  1029. .word target_ia;
  1030. .word lrlive_branchlocalstoreaddr;
  1031. brasl $75,__icache_br_handler
  1032. .quad xor_pattern
  1033. */
  1034. static bool
  1035. build_stub (struct bfd_link_info *info,
  1036. bfd *ibfd,
  1037. asection *isec,
  1038. enum _stub_type stub_type,
  1039. struct elf_link_hash_entry *h,
  1040. const Elf_Internal_Rela *irela,
  1041. bfd_vma dest,
  1042. asection *dest_sec)
  1043. {
  1044. struct spu_link_hash_table *htab = spu_hash_table (info);
  1045. unsigned int ovl, dest_ovl, set_id;
  1046. struct got_entry *g, **head;
  1047. asection *sec;
  1048. bfd_vma addend, from, to, br_dest, patt;
  1049. unsigned int lrlive;
  1050. ovl = 0;
  1051. if (stub_type != nonovl_stub)
  1052. ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
  1053. if (h != NULL)
  1054. head = &h->got.glist;
  1055. else
  1056. head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
  1057. addend = 0;
  1058. if (irela != NULL)
  1059. addend = irela->r_addend;
  1060. if (htab->params->ovly_flavour == ovly_soft_icache)
  1061. {
  1062. g = bfd_malloc (sizeof *g);
  1063. if (g == NULL)
  1064. return false;
  1065. g->ovl = ovl;
  1066. g->br_addr = 0;
  1067. if (irela != NULL)
  1068. g->br_addr = (irela->r_offset
  1069. + isec->output_offset
  1070. + isec->output_section->vma);
  1071. g->next = *head;
  1072. *head = g;
  1073. }
  1074. else
  1075. {
  1076. for (g = *head; g != NULL; g = g->next)
  1077. if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
  1078. break;
  1079. if (g == NULL)
  1080. abort ();
  1081. if (g->ovl == 0 && ovl != 0)
  1082. return true;
  1083. if (g->stub_addr != (bfd_vma) -1)
  1084. return true;
  1085. }
  1086. sec = htab->stub_sec[ovl];
  1087. dest += dest_sec->output_offset + dest_sec->output_section->vma;
  1088. from = sec->size + sec->output_offset + sec->output_section->vma;
  1089. g->stub_addr = from;
  1090. to = (htab->ovly_entry[0]->root.u.def.value
  1091. + htab->ovly_entry[0]->root.u.def.section->output_offset
  1092. + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
  1093. if (((dest | to | from) & 3) != 0)
  1094. {
  1095. htab->stub_err = 1;
  1096. return false;
  1097. }
  1098. dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
  1099. if (htab->params->ovly_flavour == ovly_normal
  1100. && !htab->params->compact_stub)
  1101. {
  1102. bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
  1103. sec->contents + sec->size);
  1104. bfd_put_32 (sec->owner, LNOP,
  1105. sec->contents + sec->size + 4);
  1106. bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
  1107. sec->contents + sec->size + 8);
  1108. if (!BRA_STUBS)
  1109. bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
  1110. sec->contents + sec->size + 12);
  1111. else
  1112. bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
  1113. sec->contents + sec->size + 12);
  1114. }
  1115. else if (htab->params->ovly_flavour == ovly_normal
  1116. && htab->params->compact_stub)
  1117. {
  1118. if (!BRA_STUBS)
  1119. bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
  1120. sec->contents + sec->size);
  1121. else
  1122. bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
  1123. sec->contents + sec->size);
  1124. bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
  1125. sec->contents + sec->size + 4);
  1126. }
  1127. else if (htab->params->ovly_flavour == ovly_soft_icache
  1128. && htab->params->compact_stub)
  1129. {
  1130. lrlive = 0;
  1131. if (stub_type == nonovl_stub)
  1132. ;
  1133. else if (stub_type == call_ovl_stub)
  1134. /* A brsl makes lr live and *(*sp+16) is live.
  1135. Tail calls have the same liveness. */
  1136. lrlive = 5;
  1137. else if (!htab->params->lrlive_analysis)
  1138. /* Assume stack frame and lr save. */
  1139. lrlive = 1;
  1140. else if (irela != NULL)
  1141. {
  1142. /* Analyse branch instructions. */
  1143. struct function_info *caller;
  1144. bfd_vma off;
  1145. caller = find_function (isec, irela->r_offset, info);
  1146. if (caller->start == NULL)
  1147. off = irela->r_offset;
  1148. else
  1149. {
  1150. struct function_info *found = NULL;
  1151. /* Find the earliest piece of this function that
  1152. has frame adjusting instructions. We might
  1153. see dynamic frame adjustment (eg. for alloca)
  1154. in some later piece, but functions using
  1155. alloca always set up a frame earlier. Frame
  1156. setup instructions are always in one piece. */
  1157. if (caller->lr_store != (bfd_vma) -1
  1158. || caller->sp_adjust != (bfd_vma) -1)
  1159. found = caller;
  1160. while (caller->start != NULL)
  1161. {
  1162. caller = caller->start;
  1163. if (caller->lr_store != (bfd_vma) -1
  1164. || caller->sp_adjust != (bfd_vma) -1)
  1165. found = caller;
  1166. }
  1167. if (found != NULL)
  1168. caller = found;
  1169. off = (bfd_vma) -1;
  1170. }
  1171. if (off > caller->sp_adjust)
  1172. {
  1173. if (off > caller->lr_store)
  1174. /* Only *(*sp+16) is live. */
  1175. lrlive = 1;
  1176. else
  1177. /* If no lr save, then we must be in a
  1178. leaf function with a frame.
  1179. lr is still live. */
  1180. lrlive = 4;
  1181. }
  1182. else if (off > caller->lr_store)
  1183. {
  1184. /* Between lr save and stack adjust. */
  1185. lrlive = 3;
  1186. /* This should never happen since prologues won't
  1187. be split here. */
  1188. BFD_ASSERT (0);
  1189. }
  1190. else
  1191. /* On entry to function. */
  1192. lrlive = 5;
  1193. if (stub_type != br000_ovl_stub
  1194. && lrlive != stub_type - br000_ovl_stub)
  1195. /* xgettext:c-format */
  1196. info->callbacks->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
  1197. "from analysis (%u)\n"),
  1198. isec, irela->r_offset, lrlive,
  1199. stub_type - br000_ovl_stub);
  1200. }
  1201. /* If given lrlive info via .brinfo, use it. */
  1202. if (stub_type > br000_ovl_stub)
  1203. lrlive = stub_type - br000_ovl_stub;
  1204. if (ovl == 0)
  1205. to = (htab->ovly_entry[1]->root.u.def.value
  1206. + htab->ovly_entry[1]->root.u.def.section->output_offset
  1207. + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
  1208. /* The branch that uses this stub goes to stub_addr + 4. We'll
  1209. set up an xor pattern that can be used by the icache manager
  1210. to modify this branch to go directly to its destination. */
  1211. g->stub_addr += 4;
  1212. br_dest = g->stub_addr;
  1213. if (irela == NULL)
  1214. {
  1215. /* Except in the case of _SPUEAR_ stubs, the branch in
  1216. question is the one in the stub itself. */
  1217. BFD_ASSERT (stub_type == nonovl_stub);
  1218. g->br_addr = g->stub_addr;
  1219. br_dest = to;
  1220. }
  1221. set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
  1222. bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
  1223. sec->contents + sec->size);
  1224. bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
  1225. sec->contents + sec->size + 4);
  1226. bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
  1227. sec->contents + sec->size + 8);
  1228. patt = dest ^ br_dest;
  1229. if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
  1230. patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
  1231. bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
  1232. sec->contents + sec->size + 12);
  1233. if (ovl == 0)
  1234. /* Extra space for linked list entries. */
  1235. sec->size += 16;
  1236. }
  1237. else
  1238. abort ();
  1239. sec->size += ovl_stub_size (htab->params);
  1240. if (htab->params->emit_stub_syms)
  1241. {
  1242. size_t len;
  1243. char *name;
  1244. int add;
  1245. len = 8 + sizeof (".ovl_call.") - 1;
  1246. if (h != NULL)
  1247. len += strlen (h->root.root.string);
  1248. else
  1249. len += 8 + 1 + 8;
  1250. add = 0;
  1251. if (irela != NULL)
  1252. add = (int) irela->r_addend & 0xffffffff;
  1253. if (add != 0)
  1254. len += 1 + 8;
  1255. name = bfd_malloc (len + 1);
  1256. if (name == NULL)
  1257. return false;
  1258. sprintf (name, "%08x.ovl_call.", g->ovl);
  1259. if (h != NULL)
  1260. strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
  1261. else
  1262. sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
  1263. dest_sec->id & 0xffffffff,
  1264. (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
  1265. if (add != 0)
  1266. sprintf (name + len - 9, "+%x", add);
  1267. h = elf_link_hash_lookup (&htab->elf, name, true, true, false);
  1268. free (name);
  1269. if (h == NULL)
  1270. return false;
  1271. if (h->root.type == bfd_link_hash_new)
  1272. {
  1273. h->root.type = bfd_link_hash_defined;
  1274. h->root.u.def.section = sec;
  1275. h->size = ovl_stub_size (htab->params);
  1276. h->root.u.def.value = sec->size - h->size;
  1277. h->type = STT_FUNC;
  1278. h->ref_regular = 1;
  1279. h->def_regular = 1;
  1280. h->ref_regular_nonweak = 1;
  1281. h->forced_local = 1;
  1282. h->non_elf = 0;
  1283. }
  1284. }
  1285. return true;
  1286. }
  1287. /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
  1288. symbols. */
  1289. static bool
  1290. allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
  1291. {
  1292. /* Symbols starting with _SPUEAR_ need a stub because they may be
  1293. invoked by the PPU. */
  1294. struct bfd_link_info *info = inf;
  1295. struct spu_link_hash_table *htab = spu_hash_table (info);
  1296. asection *sym_sec;
  1297. if ((h->root.type == bfd_link_hash_defined
  1298. || h->root.type == bfd_link_hash_defweak)
  1299. && h->def_regular
  1300. && startswith (h->root.root.string, "_SPUEAR_")
  1301. && (sym_sec = h->root.u.def.section) != NULL
  1302. && sym_sec->output_section != bfd_abs_section_ptr
  1303. && spu_elf_section_data (sym_sec->output_section) != NULL
  1304. && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
  1305. || htab->params->non_overlay_stubs))
  1306. {
  1307. return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
  1308. }
  1309. return true;
  1310. }
  1311. static bool
  1312. build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
  1313. {
  1314. /* Symbols starting with _SPUEAR_ need a stub because they may be
  1315. invoked by the PPU. */
  1316. struct bfd_link_info *info = inf;
  1317. struct spu_link_hash_table *htab = spu_hash_table (info);
  1318. asection *sym_sec;
  1319. if ((h->root.type == bfd_link_hash_defined
  1320. || h->root.type == bfd_link_hash_defweak)
  1321. && h->def_regular
  1322. && startswith (h->root.root.string, "_SPUEAR_")
  1323. && (sym_sec = h->root.u.def.section) != NULL
  1324. && sym_sec->output_section != bfd_abs_section_ptr
  1325. && spu_elf_section_data (sym_sec->output_section) != NULL
  1326. && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
  1327. || htab->params->non_overlay_stubs))
  1328. {
  1329. return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
  1330. h->root.u.def.value, sym_sec);
  1331. }
  1332. return true;
  1333. }
  1334. /* Size or build stubs. */
  1335. static bool
  1336. process_stubs (struct bfd_link_info *info, bool build)
  1337. {
  1338. struct spu_link_hash_table *htab = spu_hash_table (info);
  1339. bfd *ibfd;
  1340. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  1341. {
  1342. extern const bfd_target spu_elf32_vec;
  1343. Elf_Internal_Shdr *symtab_hdr;
  1344. asection *isec;
  1345. Elf_Internal_Sym *local_syms = NULL;
  1346. if (ibfd->xvec != &spu_elf32_vec)
  1347. continue;
  1348. /* We'll need the symbol table in a second. */
  1349. symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
  1350. if (symtab_hdr->sh_info == 0)
  1351. continue;
  1352. /* Walk over each section attached to the input bfd. */
  1353. for (isec = ibfd->sections; isec != NULL; isec = isec->next)
  1354. {
  1355. Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
  1356. /* If there aren't any relocs, then there's nothing more to do. */
  1357. if ((isec->flags & SEC_RELOC) == 0
  1358. || isec->reloc_count == 0)
  1359. continue;
  1360. if (!maybe_needs_stubs (isec))
  1361. continue;
  1362. /* Get the relocs. */
  1363. internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
  1364. info->keep_memory);
  1365. if (internal_relocs == NULL)
  1366. goto error_ret_free_local;
  1367. /* Now examine each relocation. */
  1368. irela = internal_relocs;
  1369. irelaend = irela + isec->reloc_count;
  1370. for (; irela < irelaend; irela++)
  1371. {
  1372. enum elf_spu_reloc_type r_type;
  1373. unsigned int r_indx;
  1374. asection *sym_sec;
  1375. Elf_Internal_Sym *sym;
  1376. struct elf_link_hash_entry *h;
  1377. enum _stub_type stub_type;
  1378. r_type = ELF32_R_TYPE (irela->r_info);
  1379. r_indx = ELF32_R_SYM (irela->r_info);
  1380. if (r_type >= R_SPU_max)
  1381. {
  1382. bfd_set_error (bfd_error_bad_value);
  1383. error_ret_free_internal:
  1384. if (elf_section_data (isec)->relocs != internal_relocs)
  1385. free (internal_relocs);
  1386. error_ret_free_local:
  1387. if (symtab_hdr->contents != (unsigned char *) local_syms)
  1388. free (local_syms);
  1389. return false;
  1390. }
  1391. /* Determine the reloc target section. */
  1392. if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
  1393. goto error_ret_free_internal;
  1394. stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
  1395. NULL, info);
  1396. if (stub_type == no_stub)
  1397. continue;
  1398. else if (stub_type == stub_error)
  1399. goto error_ret_free_internal;
  1400. if (htab->stub_count == NULL)
  1401. {
  1402. bfd_size_type amt;
  1403. amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
  1404. htab->stub_count = bfd_zmalloc (amt);
  1405. if (htab->stub_count == NULL)
  1406. goto error_ret_free_internal;
  1407. }
  1408. if (!build)
  1409. {
  1410. if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
  1411. goto error_ret_free_internal;
  1412. }
  1413. else
  1414. {
  1415. bfd_vma dest;
  1416. if (h != NULL)
  1417. dest = h->root.u.def.value;
  1418. else
  1419. dest = sym->st_value;
  1420. dest += irela->r_addend;
  1421. if (!build_stub (info, ibfd, isec, stub_type, h, irela,
  1422. dest, sym_sec))
  1423. goto error_ret_free_internal;
  1424. }
  1425. }
  1426. /* We're done with the internal relocs, free them. */
  1427. if (elf_section_data (isec)->relocs != internal_relocs)
  1428. free (internal_relocs);
  1429. }
  1430. if (local_syms != NULL
  1431. && symtab_hdr->contents != (unsigned char *) local_syms)
  1432. {
  1433. if (!info->keep_memory)
  1434. free (local_syms);
  1435. else
  1436. symtab_hdr->contents = (unsigned char *) local_syms;
  1437. }
  1438. }
  1439. return true;
  1440. }
  1441. /* Allocate space for overlay call and return stubs.
  1442. Return 0 on error, 1 if no overlays, 2 otherwise. */
  1443. int
  1444. spu_elf_size_stubs (struct bfd_link_info *info)
  1445. {
  1446. struct spu_link_hash_table *htab;
  1447. bfd *ibfd;
  1448. bfd_size_type amt;
  1449. flagword flags;
  1450. unsigned int i;
  1451. asection *stub;
  1452. if (!process_stubs (info, false))
  1453. return 0;
  1454. htab = spu_hash_table (info);
  1455. elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
  1456. if (htab->stub_err)
  1457. return 0;
  1458. ibfd = info->input_bfds;
  1459. if (htab->stub_count != NULL)
  1460. {
  1461. amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
  1462. htab->stub_sec = bfd_zmalloc (amt);
  1463. if (htab->stub_sec == NULL)
  1464. return 0;
  1465. flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
  1466. | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
  1467. stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
  1468. htab->stub_sec[0] = stub;
  1469. if (stub == NULL
  1470. || !bfd_set_section_alignment (stub,
  1471. ovl_stub_size_log2 (htab->params)))
  1472. return 0;
  1473. stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
  1474. if (htab->params->ovly_flavour == ovly_soft_icache)
  1475. /* Extra space for linked list entries. */
  1476. stub->size += htab->stub_count[0] * 16;
  1477. for (i = 0; i < htab->num_overlays; ++i)
  1478. {
  1479. asection *osec = htab->ovl_sec[i];
  1480. unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
  1481. stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
  1482. htab->stub_sec[ovl] = stub;
  1483. if (stub == NULL
  1484. || !bfd_set_section_alignment (stub,
  1485. ovl_stub_size_log2 (htab->params)))
  1486. return 0;
  1487. stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
  1488. }
  1489. }
  1490. if (htab->params->ovly_flavour == ovly_soft_icache)
  1491. {
  1492. /* Space for icache manager tables.
  1493. a) Tag array, one quadword per cache line.
  1494. b) Rewrite "to" list, one quadword per cache line.
  1495. c) Rewrite "from" list, one byte per outgoing branch (rounded up to
  1496. a power-of-two number of full quadwords) per cache line. */
  1497. flags = SEC_ALLOC;
  1498. htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
  1499. if (htab->ovtab == NULL
  1500. || !bfd_set_section_alignment (htab->ovtab, 4))
  1501. return 0;
  1502. htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
  1503. << htab->num_lines_log2;
  1504. flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
  1505. htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
  1506. if (htab->init == NULL
  1507. || !bfd_set_section_alignment (htab->init, 4))
  1508. return 0;
  1509. htab->init->size = 16;
  1510. }
  1511. else if (htab->stub_count == NULL)
  1512. return 1;
  1513. else
  1514. {
  1515. /* htab->ovtab consists of two arrays.
  1516. . struct {
  1517. . u32 vma;
  1518. . u32 size;
  1519. . u32 file_off;
  1520. . u32 buf;
  1521. . } _ovly_table[];
  1522. .
  1523. . struct {
  1524. . u32 mapped;
  1525. . } _ovly_buf_table[];
  1526. . */
  1527. flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
  1528. htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
  1529. if (htab->ovtab == NULL
  1530. || !bfd_set_section_alignment (htab->ovtab, 4))
  1531. return 0;
  1532. htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
  1533. }
  1534. htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
  1535. if (htab->toe == NULL
  1536. || !bfd_set_section_alignment (htab->toe, 4))
  1537. return 0;
  1538. htab->toe->size = 16;
  1539. return 2;
  1540. }
  1541. /* Called from ld to place overlay manager data sections. This is done
  1542. after the overlay manager itself is loaded, mainly so that the
  1543. linker's htab->init section is placed after any other .ovl.init
  1544. sections. */
  1545. void
  1546. spu_elf_place_overlay_data (struct bfd_link_info *info)
  1547. {
  1548. struct spu_link_hash_table *htab = spu_hash_table (info);
  1549. unsigned int i;
  1550. if (htab->stub_sec != NULL)
  1551. {
  1552. (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
  1553. for (i = 0; i < htab->num_overlays; ++i)
  1554. {
  1555. asection *osec = htab->ovl_sec[i];
  1556. unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
  1557. (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
  1558. }
  1559. }
  1560. if (htab->params->ovly_flavour == ovly_soft_icache)
  1561. (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
  1562. if (htab->ovtab != NULL)
  1563. {
  1564. const char *ovout = ".data";
  1565. if (htab->params->ovly_flavour == ovly_soft_icache)
  1566. ovout = ".bss";
  1567. (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
  1568. }
  1569. if (htab->toe != NULL)
  1570. (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
  1571. }
  1572. /* Functions to handle embedded spu_ovl.o object. */
  1573. static void *
  1574. ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
  1575. {
  1576. return stream;
  1577. }
  1578. static file_ptr
  1579. ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
  1580. void *stream,
  1581. void *buf,
  1582. file_ptr nbytes,
  1583. file_ptr offset)
  1584. {
  1585. struct _ovl_stream *os;
  1586. size_t count;
  1587. size_t max;
  1588. os = (struct _ovl_stream *) stream;
  1589. max = (const char *) os->end - (const char *) os->start;
  1590. if ((ufile_ptr) offset >= max)
  1591. return 0;
  1592. count = nbytes;
  1593. if (count > max - offset)
  1594. count = max - offset;
  1595. memcpy (buf, (const char *) os->start + offset, count);
  1596. return count;
  1597. }
  1598. static int
  1599. ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
  1600. void *stream,
  1601. struct stat *sb)
  1602. {
  1603. struct _ovl_stream *os = (struct _ovl_stream *) stream;
  1604. memset (sb, 0, sizeof (*sb));
  1605. sb->st_size = (const char *) os->end - (const char *) os->start;
  1606. return 0;
  1607. }
  1608. bool
  1609. spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
  1610. {
  1611. *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
  1612. "elf32-spu",
  1613. ovl_mgr_open,
  1614. (void *) stream,
  1615. ovl_mgr_pread,
  1616. NULL,
  1617. ovl_mgr_stat);
  1618. return *ovl_bfd != NULL;
  1619. }
  1620. static unsigned int
  1621. overlay_index (asection *sec)
  1622. {
  1623. if (sec == NULL
  1624. || sec->output_section == bfd_abs_section_ptr)
  1625. return 0;
  1626. return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
  1627. }
  1628. /* Define an STT_OBJECT symbol. */
  1629. static struct elf_link_hash_entry *
  1630. define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
  1631. {
  1632. struct elf_link_hash_entry *h;
  1633. h = elf_link_hash_lookup (&htab->elf, name, true, false, false);
  1634. if (h == NULL)
  1635. return NULL;
  1636. if (h->root.type != bfd_link_hash_defined
  1637. || !h->def_regular)
  1638. {
  1639. h->root.type = bfd_link_hash_defined;
  1640. h->root.u.def.section = htab->ovtab;
  1641. h->type = STT_OBJECT;
  1642. h->ref_regular = 1;
  1643. h->def_regular = 1;
  1644. h->ref_regular_nonweak = 1;
  1645. h->non_elf = 0;
  1646. }
  1647. else if (h->root.u.def.section->owner != NULL)
  1648. {
  1649. /* xgettext:c-format */
  1650. _bfd_error_handler (_("%pB is not allowed to define %s"),
  1651. h->root.u.def.section->owner,
  1652. h->root.root.string);
  1653. bfd_set_error (bfd_error_bad_value);
  1654. return NULL;
  1655. }
  1656. else
  1657. {
  1658. _bfd_error_handler (_("you are not allowed to define %s in a script"),
  1659. h->root.root.string);
  1660. bfd_set_error (bfd_error_bad_value);
  1661. return NULL;
  1662. }
  1663. return h;
  1664. }
  1665. /* Fill in all stubs and the overlay tables. */
  1666. static bool
  1667. spu_elf_build_stubs (struct bfd_link_info *info)
  1668. {
  1669. struct spu_link_hash_table *htab = spu_hash_table (info);
  1670. struct elf_link_hash_entry *h;
  1671. bfd_byte *p;
  1672. asection *s;
  1673. bfd *obfd;
  1674. unsigned int i;
  1675. if (htab->num_overlays != 0)
  1676. {
  1677. for (i = 0; i < 2; i++)
  1678. {
  1679. h = htab->ovly_entry[i];
  1680. if (h != NULL
  1681. && (h->root.type == bfd_link_hash_defined
  1682. || h->root.type == bfd_link_hash_defweak)
  1683. && h->def_regular)
  1684. {
  1685. s = h->root.u.def.section->output_section;
  1686. if (spu_elf_section_data (s)->u.o.ovl_index)
  1687. {
  1688. _bfd_error_handler (_("%s in overlay section"),
  1689. h->root.root.string);
  1690. bfd_set_error (bfd_error_bad_value);
  1691. return false;
  1692. }
  1693. }
  1694. }
  1695. }
  1696. if (htab->stub_sec != NULL)
  1697. {
  1698. for (i = 0; i <= htab->num_overlays; i++)
  1699. if (htab->stub_sec[i]->size != 0)
  1700. {
  1701. htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
  1702. htab->stub_sec[i]->size);
  1703. if (htab->stub_sec[i]->contents == NULL)
  1704. return false;
  1705. htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
  1706. htab->stub_sec[i]->size = 0;
  1707. }
  1708. /* Fill in all the stubs. */
  1709. process_stubs (info, true);
  1710. if (!htab->stub_err)
  1711. elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
  1712. if (htab->stub_err)
  1713. {
  1714. _bfd_error_handler (_("overlay stub relocation overflow"));
  1715. bfd_set_error (bfd_error_bad_value);
  1716. return false;
  1717. }
  1718. for (i = 0; i <= htab->num_overlays; i++)
  1719. {
  1720. if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
  1721. {
  1722. _bfd_error_handler (_("stubs don't match calculated size"));
  1723. bfd_set_error (bfd_error_bad_value);
  1724. return false;
  1725. }
  1726. htab->stub_sec[i]->rawsize = 0;
  1727. }
  1728. }
  1729. if (htab->ovtab == NULL || htab->ovtab->size == 0)
  1730. return true;
  1731. htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
  1732. if (htab->ovtab->contents == NULL)
  1733. return false;
  1734. p = htab->ovtab->contents;
  1735. if (htab->params->ovly_flavour == ovly_soft_icache)
  1736. {
  1737. bfd_vma off;
  1738. h = define_ovtab_symbol (htab, "__icache_tag_array");
  1739. if (h == NULL)
  1740. return false;
  1741. h->root.u.def.value = 0;
  1742. h->size = 16 << htab->num_lines_log2;
  1743. off = h->size;
  1744. h = define_ovtab_symbol (htab, "__icache_tag_array_size");
  1745. if (h == NULL)
  1746. return false;
  1747. h->root.u.def.value = 16 << htab->num_lines_log2;
  1748. h->root.u.def.section = bfd_abs_section_ptr;
  1749. h = define_ovtab_symbol (htab, "__icache_rewrite_to");
  1750. if (h == NULL)
  1751. return false;
  1752. h->root.u.def.value = off;
  1753. h->size = 16 << htab->num_lines_log2;
  1754. off += h->size;
  1755. h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
  1756. if (h == NULL)
  1757. return false;
  1758. h->root.u.def.value = 16 << htab->num_lines_log2;
  1759. h->root.u.def.section = bfd_abs_section_ptr;
  1760. h = define_ovtab_symbol (htab, "__icache_rewrite_from");
  1761. if (h == NULL)
  1762. return false;
  1763. h->root.u.def.value = off;
  1764. h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
  1765. off += h->size;
  1766. h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
  1767. if (h == NULL)
  1768. return false;
  1769. h->root.u.def.value = 16 << (htab->fromelem_size_log2
  1770. + htab->num_lines_log2);
  1771. h->root.u.def.section = bfd_abs_section_ptr;
  1772. h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
  1773. if (h == NULL)
  1774. return false;
  1775. h->root.u.def.value = htab->fromelem_size_log2;
  1776. h->root.u.def.section = bfd_abs_section_ptr;
  1777. h = define_ovtab_symbol (htab, "__icache_base");
  1778. if (h == NULL)
  1779. return false;
  1780. h->root.u.def.value = htab->ovl_sec[0]->vma;
  1781. h->root.u.def.section = bfd_abs_section_ptr;
  1782. h->size = htab->num_buf << htab->line_size_log2;
  1783. h = define_ovtab_symbol (htab, "__icache_linesize");
  1784. if (h == NULL)
  1785. return false;
  1786. h->root.u.def.value = 1 << htab->line_size_log2;
  1787. h->root.u.def.section = bfd_abs_section_ptr;
  1788. h = define_ovtab_symbol (htab, "__icache_log2_linesize");
  1789. if (h == NULL)
  1790. return false;
  1791. h->root.u.def.value = htab->line_size_log2;
  1792. h->root.u.def.section = bfd_abs_section_ptr;
  1793. h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
  1794. if (h == NULL)
  1795. return false;
  1796. h->root.u.def.value = -htab->line_size_log2;
  1797. h->root.u.def.section = bfd_abs_section_ptr;
  1798. h = define_ovtab_symbol (htab, "__icache_cachesize");
  1799. if (h == NULL)
  1800. return false;
  1801. h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
  1802. h->root.u.def.section = bfd_abs_section_ptr;
  1803. h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
  1804. if (h == NULL)
  1805. return false;
  1806. h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
  1807. h->root.u.def.section = bfd_abs_section_ptr;
  1808. h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
  1809. if (h == NULL)
  1810. return false;
  1811. h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
  1812. h->root.u.def.section = bfd_abs_section_ptr;
  1813. if (htab->init != NULL && htab->init->size != 0)
  1814. {
  1815. htab->init->contents = bfd_zalloc (htab->init->owner,
  1816. htab->init->size);
  1817. if (htab->init->contents == NULL)
  1818. return false;
  1819. h = define_ovtab_symbol (htab, "__icache_fileoff");
  1820. if (h == NULL)
  1821. return false;
  1822. h->root.u.def.value = 0;
  1823. h->root.u.def.section = htab->init;
  1824. h->size = 8;
  1825. }
  1826. }
  1827. else
  1828. {
  1829. /* Write out _ovly_table. */
  1830. /* set low bit of .size to mark non-overlay area as present. */
  1831. p[7] = 1;
  1832. obfd = htab->ovtab->output_section->owner;
  1833. for (s = obfd->sections; s != NULL; s = s->next)
  1834. {
  1835. unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
  1836. if (ovl_index != 0)
  1837. {
  1838. unsigned long off = ovl_index * 16;
  1839. unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
  1840. bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
  1841. bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
  1842. p + off + 4);
  1843. /* file_off written later in spu_elf_modify_headers. */
  1844. bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
  1845. }
  1846. }
  1847. h = define_ovtab_symbol (htab, "_ovly_table");
  1848. if (h == NULL)
  1849. return false;
  1850. h->root.u.def.value = 16;
  1851. h->size = htab->num_overlays * 16;
  1852. h = define_ovtab_symbol (htab, "_ovly_table_end");
  1853. if (h == NULL)
  1854. return false;
  1855. h->root.u.def.value = htab->num_overlays * 16 + 16;
  1856. h->size = 0;
  1857. h = define_ovtab_symbol (htab, "_ovly_buf_table");
  1858. if (h == NULL)
  1859. return false;
  1860. h->root.u.def.value = htab->num_overlays * 16 + 16;
  1861. h->size = htab->num_buf * 4;
  1862. h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
  1863. if (h == NULL)
  1864. return false;
  1865. h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
  1866. h->size = 0;
  1867. }
  1868. h = define_ovtab_symbol (htab, "_EAR_");
  1869. if (h == NULL)
  1870. return false;
  1871. h->root.u.def.section = htab->toe;
  1872. h->root.u.def.value = 0;
  1873. h->size = 16;
  1874. return true;
  1875. }
  1876. /* Check that all loadable section VMAs lie in the range
  1877. LO .. HI inclusive, and stash some parameters for --auto-overlay. */
  1878. asection *
  1879. spu_elf_check_vma (struct bfd_link_info *info)
  1880. {
  1881. struct elf_segment_map *m;
  1882. unsigned int i;
  1883. struct spu_link_hash_table *htab = spu_hash_table (info);
  1884. bfd *abfd = info->output_bfd;
  1885. bfd_vma hi = htab->params->local_store_hi;
  1886. bfd_vma lo = htab->params->local_store_lo;
  1887. htab->local_store = hi + 1 - lo;
  1888. for (m = elf_seg_map (abfd); m != NULL; m = m->next)
  1889. if (m->p_type == PT_LOAD)
  1890. for (i = 0; i < m->count; i++)
  1891. if (m->sections[i]->size != 0
  1892. && (m->sections[i]->vma < lo
  1893. || m->sections[i]->vma > hi
  1894. || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
  1895. return m->sections[i];
  1896. return NULL;
  1897. }
  1898. /* OFFSET in SEC (presumably) is the beginning of a function prologue.
  1899. Search for stack adjusting insns, and return the sp delta.
  1900. If a store of lr is found save the instruction offset to *LR_STORE.
  1901. If a stack adjusting instruction is found, save that offset to
  1902. *SP_ADJUST. */
  1903. static int
  1904. find_function_stack_adjust (asection *sec,
  1905. bfd_vma offset,
  1906. bfd_vma *lr_store,
  1907. bfd_vma *sp_adjust)
  1908. {
  1909. int32_t reg[128];
  1910. memset (reg, 0, sizeof (reg));
  1911. for ( ; offset + 4 <= sec->size; offset += 4)
  1912. {
  1913. unsigned char buf[4];
  1914. int rt, ra;
  1915. uint32_t imm;
  1916. /* Assume no relocs on stack adjusing insns. */
  1917. if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
  1918. break;
  1919. rt = buf[3] & 0x7f;
  1920. ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
  1921. if (buf[0] == 0x24 /* stqd */)
  1922. {
  1923. if (rt == 0 /* lr */ && ra == 1 /* sp */)
  1924. *lr_store = offset;
  1925. continue;
  1926. }
  1927. /* Partly decoded immediate field. */
  1928. imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
  1929. if (buf[0] == 0x1c /* ai */)
  1930. {
  1931. imm >>= 7;
  1932. imm = (imm ^ 0x200) - 0x200;
  1933. reg[rt] = reg[ra] + imm;
  1934. if (rt == 1 /* sp */)
  1935. {
  1936. if (reg[rt] > 0)
  1937. break;
  1938. *sp_adjust = offset;
  1939. return reg[rt];
  1940. }
  1941. }
  1942. else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
  1943. {
  1944. int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
  1945. reg[rt] = reg[ra] + reg[rb];
  1946. if (rt == 1)
  1947. {
  1948. if (reg[rt] > 0)
  1949. break;
  1950. *sp_adjust = offset;
  1951. return reg[rt];
  1952. }
  1953. }
  1954. else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
  1955. {
  1956. int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
  1957. reg[rt] = reg[rb] - reg[ra];
  1958. if (rt == 1)
  1959. {
  1960. if (reg[rt] > 0)
  1961. break;
  1962. *sp_adjust = offset;
  1963. return reg[rt];
  1964. }
  1965. }
  1966. else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
  1967. {
  1968. if (buf[0] >= 0x42 /* ila */)
  1969. imm |= (buf[0] & 1) << 17;
  1970. else
  1971. {
  1972. imm &= 0xffff;
  1973. if (buf[0] == 0x40 /* il */)
  1974. {
  1975. if ((buf[1] & 0x80) == 0)
  1976. continue;
  1977. imm = (imm ^ 0x8000) - 0x8000;
  1978. }
  1979. else if ((buf[1] & 0x80) == 0 /* ilhu */)
  1980. imm <<= 16;
  1981. }
  1982. reg[rt] = imm;
  1983. continue;
  1984. }
  1985. else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
  1986. {
  1987. reg[rt] |= imm & 0xffff;
  1988. continue;
  1989. }
  1990. else if (buf[0] == 0x04 /* ori */)
  1991. {
  1992. imm >>= 7;
  1993. imm = (imm ^ 0x200) - 0x200;
  1994. reg[rt] = reg[ra] | imm;
  1995. continue;
  1996. }
  1997. else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
  1998. {
  1999. reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
  2000. | ((imm & 0x4000) ? 0x00ff0000 : 0)
  2001. | ((imm & 0x2000) ? 0x0000ff00 : 0)
  2002. | ((imm & 0x1000) ? 0x000000ff : 0));
  2003. continue;
  2004. }
  2005. else if (buf[0] == 0x16 /* andbi */)
  2006. {
  2007. imm >>= 7;
  2008. imm &= 0xff;
  2009. imm |= imm << 8;
  2010. imm |= imm << 16;
  2011. reg[rt] = reg[ra] & imm;
  2012. continue;
  2013. }
  2014. else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
  2015. {
  2016. /* Used in pic reg load. Say rt is trashed. Won't be used
  2017. in stack adjust, but we need to continue past this branch. */
  2018. reg[rt] = 0;
  2019. continue;
  2020. }
  2021. else if (is_branch (buf) || is_indirect_branch (buf))
  2022. /* If we hit a branch then we must be out of the prologue. */
  2023. break;
  2024. }
  2025. return 0;
  2026. }
  2027. /* qsort predicate to sort symbols by section and value. */
  2028. static Elf_Internal_Sym *sort_syms_syms;
  2029. static asection **sort_syms_psecs;
  2030. static int
  2031. sort_syms (const void *a, const void *b)
  2032. {
  2033. Elf_Internal_Sym *const *s1 = a;
  2034. Elf_Internal_Sym *const *s2 = b;
  2035. asection *sec1,*sec2;
  2036. bfd_signed_vma delta;
  2037. sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
  2038. sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
  2039. if (sec1 != sec2)
  2040. return sec1->index - sec2->index;
  2041. delta = (*s1)->st_value - (*s2)->st_value;
  2042. if (delta != 0)
  2043. return delta < 0 ? -1 : 1;
  2044. delta = (*s2)->st_size - (*s1)->st_size;
  2045. if (delta != 0)
  2046. return delta < 0 ? -1 : 1;
  2047. return *s1 < *s2 ? -1 : 1;
  2048. }
  2049. /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
  2050. entries for section SEC. */
  2051. static struct spu_elf_stack_info *
  2052. alloc_stack_info (asection *sec, int max_fun)
  2053. {
  2054. struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
  2055. bfd_size_type amt;
  2056. amt = sizeof (struct spu_elf_stack_info);
  2057. amt += (max_fun - 1) * sizeof (struct function_info);
  2058. sec_data->u.i.stack_info = bfd_zmalloc (amt);
  2059. if (sec_data->u.i.stack_info != NULL)
  2060. sec_data->u.i.stack_info->max_fun = max_fun;
  2061. return sec_data->u.i.stack_info;
  2062. }
  2063. /* Add a new struct function_info describing a (part of a) function
  2064. starting at SYM_H. Keep the array sorted by address. */
  2065. static struct function_info *
  2066. maybe_insert_function (asection *sec,
  2067. void *sym_h,
  2068. bool global,
  2069. bool is_func)
  2070. {
  2071. struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
  2072. struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
  2073. int i;
  2074. bfd_vma off, size;
  2075. if (sinfo == NULL)
  2076. {
  2077. sinfo = alloc_stack_info (sec, 20);
  2078. if (sinfo == NULL)
  2079. return NULL;
  2080. }
  2081. if (!global)
  2082. {
  2083. Elf_Internal_Sym *sym = sym_h;
  2084. off = sym->st_value;
  2085. size = sym->st_size;
  2086. }
  2087. else
  2088. {
  2089. struct elf_link_hash_entry *h = sym_h;
  2090. off = h->root.u.def.value;
  2091. size = h->size;
  2092. }
  2093. for (i = sinfo->num_fun; --i >= 0; )
  2094. if (sinfo->fun[i].lo <= off)
  2095. break;
  2096. if (i >= 0)
  2097. {
  2098. /* Don't add another entry for an alias, but do update some
  2099. info. */
  2100. if (sinfo->fun[i].lo == off)
  2101. {
  2102. /* Prefer globals over local syms. */
  2103. if (global && !sinfo->fun[i].global)
  2104. {
  2105. sinfo->fun[i].global = true;
  2106. sinfo->fun[i].u.h = sym_h;
  2107. }
  2108. if (is_func)
  2109. sinfo->fun[i].is_func = true;
  2110. return &sinfo->fun[i];
  2111. }
  2112. /* Ignore a zero-size symbol inside an existing function. */
  2113. else if (sinfo->fun[i].hi > off && size == 0)
  2114. return &sinfo->fun[i];
  2115. }
  2116. if (sinfo->num_fun >= sinfo->max_fun)
  2117. {
  2118. bfd_size_type amt = sizeof (struct spu_elf_stack_info);
  2119. bfd_size_type old = amt;
  2120. old += (sinfo->max_fun - 1) * sizeof (struct function_info);
  2121. sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
  2122. amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
  2123. sinfo = bfd_realloc (sinfo, amt);
  2124. if (sinfo == NULL)
  2125. return NULL;
  2126. memset ((char *) sinfo + old, 0, amt - old);
  2127. sec_data->u.i.stack_info = sinfo;
  2128. }
  2129. if (++i < sinfo->num_fun)
  2130. memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
  2131. (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
  2132. sinfo->fun[i].is_func = is_func;
  2133. sinfo->fun[i].global = global;
  2134. sinfo->fun[i].sec = sec;
  2135. if (global)
  2136. sinfo->fun[i].u.h = sym_h;
  2137. else
  2138. sinfo->fun[i].u.sym = sym_h;
  2139. sinfo->fun[i].lo = off;
  2140. sinfo->fun[i].hi = off + size;
  2141. sinfo->fun[i].lr_store = -1;
  2142. sinfo->fun[i].sp_adjust = -1;
  2143. sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
  2144. &sinfo->fun[i].lr_store,
  2145. &sinfo->fun[i].sp_adjust);
  2146. sinfo->num_fun += 1;
  2147. return &sinfo->fun[i];
  2148. }
  2149. /* Return the name of FUN. */
  2150. static const char *
  2151. func_name (struct function_info *fun)
  2152. {
  2153. asection *sec;
  2154. bfd *ibfd;
  2155. Elf_Internal_Shdr *symtab_hdr;
  2156. while (fun->start != NULL)
  2157. fun = fun->start;
  2158. if (fun->global)
  2159. return fun->u.h->root.root.string;
  2160. sec = fun->sec;
  2161. if (fun->u.sym->st_name == 0)
  2162. {
  2163. size_t len = strlen (sec->name);
  2164. char *name = bfd_malloc (len + 10);
  2165. if (name == NULL)
  2166. return "(null)";
  2167. sprintf (name, "%s+%lx", sec->name,
  2168. (unsigned long) fun->u.sym->st_value & 0xffffffff);
  2169. return name;
  2170. }
  2171. ibfd = sec->owner;
  2172. symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
  2173. return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
  2174. }
  2175. /* Read the instruction at OFF in SEC. Return true iff the instruction
  2176. is a nop, lnop, or stop 0 (all zero insn). */
  2177. static bool
  2178. is_nop (asection *sec, bfd_vma off)
  2179. {
  2180. unsigned char insn[4];
  2181. if (off + 4 > sec->size
  2182. || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
  2183. return false;
  2184. if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
  2185. return true;
  2186. if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
  2187. return true;
  2188. return false;
  2189. }
  2190. /* Extend the range of FUN to cover nop padding up to LIMIT.
  2191. Return TRUE iff some instruction other than a NOP was found. */
  2192. static bool
  2193. insns_at_end (struct function_info *fun, bfd_vma limit)
  2194. {
  2195. bfd_vma off = (fun->hi + 3) & -4;
  2196. while (off < limit && is_nop (fun->sec, off))
  2197. off += 4;
  2198. if (off < limit)
  2199. {
  2200. fun->hi = off;
  2201. return true;
  2202. }
  2203. fun->hi = limit;
  2204. return false;
  2205. }
  2206. /* Check and fix overlapping function ranges. Return TRUE iff there
  2207. are gaps in the current info we have about functions in SEC. */
  2208. static bool
  2209. check_function_ranges (asection *sec, struct bfd_link_info *info)
  2210. {
  2211. struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
  2212. struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
  2213. int i;
  2214. bool gaps = false;
  2215. if (sinfo == NULL)
  2216. return false;
  2217. for (i = 1; i < sinfo->num_fun; i++)
  2218. if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
  2219. {
  2220. /* Fix overlapping symbols. */
  2221. const char *f1 = func_name (&sinfo->fun[i - 1]);
  2222. const char *f2 = func_name (&sinfo->fun[i]);
  2223. /* xgettext:c-format */
  2224. info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
  2225. sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
  2226. }
  2227. else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
  2228. gaps = true;
  2229. if (sinfo->num_fun == 0)
  2230. gaps = true;
  2231. else
  2232. {
  2233. if (sinfo->fun[0].lo != 0)
  2234. gaps = true;
  2235. if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
  2236. {
  2237. const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
  2238. info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
  2239. sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
  2240. }
  2241. else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
  2242. gaps = true;
  2243. }
  2244. return gaps;
  2245. }
  2246. /* Search current function info for a function that contains address
  2247. OFFSET in section SEC. */
  2248. static struct function_info *
  2249. find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
  2250. {
  2251. struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
  2252. struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
  2253. int lo, hi, mid;
  2254. lo = 0;
  2255. hi = sinfo->num_fun;
  2256. while (lo < hi)
  2257. {
  2258. mid = (lo + hi) / 2;
  2259. if (offset < sinfo->fun[mid].lo)
  2260. hi = mid;
  2261. else if (offset >= sinfo->fun[mid].hi)
  2262. lo = mid + 1;
  2263. else
  2264. return &sinfo->fun[mid];
  2265. }
  2266. /* xgettext:c-format */
  2267. info->callbacks->einfo (_("%pA:0x%v not found in function table\n"),
  2268. sec, offset);
  2269. bfd_set_error (bfd_error_bad_value);
  2270. return NULL;
  2271. }
  2272. /* Add CALLEE to CALLER call list if not already present. Return TRUE
  2273. if CALLEE was new. If this function return FALSE, CALLEE should
  2274. be freed. */
  2275. static bool
  2276. insert_callee (struct function_info *caller, struct call_info *callee)
  2277. {
  2278. struct call_info **pp, *p;
  2279. for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
  2280. if (p->fun == callee->fun)
  2281. {
  2282. /* Tail calls use less stack than normal calls. Retain entry
  2283. for normal call over one for tail call. */
  2284. p->is_tail &= callee->is_tail;
  2285. if (!p->is_tail)
  2286. {
  2287. p->fun->start = NULL;
  2288. p->fun->is_func = true;
  2289. }
  2290. p->count += callee->count;
  2291. /* Reorder list so most recent call is first. */
  2292. *pp = p->next;
  2293. p->next = caller->call_list;
  2294. caller->call_list = p;
  2295. return false;
  2296. }
  2297. callee->next = caller->call_list;
  2298. caller->call_list = callee;
  2299. return true;
  2300. }
  2301. /* Copy CALL and insert the copy into CALLER. */
  2302. static bool
  2303. copy_callee (struct function_info *caller, const struct call_info *call)
  2304. {
  2305. struct call_info *callee;
  2306. callee = bfd_malloc (sizeof (*callee));
  2307. if (callee == NULL)
  2308. return false;
  2309. *callee = *call;
  2310. if (!insert_callee (caller, callee))
  2311. free (callee);
  2312. return true;
  2313. }
  2314. /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
  2315. overlay stub sections. */
  2316. static bool
  2317. interesting_section (asection *s)
  2318. {
  2319. return (s->output_section != bfd_abs_section_ptr
  2320. && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
  2321. == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
  2322. && s->size != 0);
  2323. }
  2324. /* Rummage through the relocs for SEC, looking for function calls.
  2325. If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
  2326. mark destination symbols on calls as being functions. Also
  2327. look at branches, which may be tail calls or go to hot/cold
  2328. section part of same function. */
  2329. static bool
  2330. mark_functions_via_relocs (asection *sec,
  2331. struct bfd_link_info *info,
  2332. int call_tree)
  2333. {
  2334. Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
  2335. Elf_Internal_Shdr *symtab_hdr;
  2336. void *psyms;
  2337. unsigned int priority = 0;
  2338. static bool warned;
  2339. if (!interesting_section (sec)
  2340. || sec->reloc_count == 0)
  2341. return true;
  2342. internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
  2343. info->keep_memory);
  2344. if (internal_relocs == NULL)
  2345. return false;
  2346. symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
  2347. psyms = &symtab_hdr->contents;
  2348. irela = internal_relocs;
  2349. irelaend = irela + sec->reloc_count;
  2350. for (; irela < irelaend; irela++)
  2351. {
  2352. enum elf_spu_reloc_type r_type;
  2353. unsigned int r_indx;
  2354. asection *sym_sec;
  2355. Elf_Internal_Sym *sym;
  2356. struct elf_link_hash_entry *h;
  2357. bfd_vma val;
  2358. bool nonbranch, is_call;
  2359. struct function_info *caller;
  2360. struct call_info *callee;
  2361. r_type = ELF32_R_TYPE (irela->r_info);
  2362. nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
  2363. r_indx = ELF32_R_SYM (irela->r_info);
  2364. if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
  2365. return false;
  2366. if (sym_sec == NULL
  2367. || sym_sec->output_section == bfd_abs_section_ptr)
  2368. continue;
  2369. is_call = false;
  2370. if (!nonbranch)
  2371. {
  2372. unsigned char insn[4];
  2373. if (!bfd_get_section_contents (sec->owner, sec, insn,
  2374. irela->r_offset, 4))
  2375. return false;
  2376. if (is_branch (insn))
  2377. {
  2378. is_call = (insn[0] & 0xfd) == 0x31;
  2379. priority = insn[1] & 0x0f;
  2380. priority <<= 8;
  2381. priority |= insn[2];
  2382. priority <<= 8;
  2383. priority |= insn[3];
  2384. priority >>= 7;
  2385. if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
  2386. != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
  2387. {
  2388. if (!warned)
  2389. info->callbacks->einfo
  2390. /* xgettext:c-format */
  2391. (_("%pB(%pA+0x%v): call to non-code section"
  2392. " %pB(%pA), analysis incomplete\n"),
  2393. sec->owner, sec, irela->r_offset,
  2394. sym_sec->owner, sym_sec);
  2395. warned = true;
  2396. continue;
  2397. }
  2398. }
  2399. else
  2400. {
  2401. nonbranch = true;
  2402. if (is_hint (insn))
  2403. continue;
  2404. }
  2405. }
  2406. if (nonbranch)
  2407. {
  2408. /* For --auto-overlay, count possible stubs we need for
  2409. function pointer references. */
  2410. unsigned int sym_type;
  2411. if (h)
  2412. sym_type = h->type;
  2413. else
  2414. sym_type = ELF_ST_TYPE (sym->st_info);
  2415. if (sym_type == STT_FUNC)
  2416. {
  2417. if (call_tree && spu_hash_table (info)->params->auto_overlay)
  2418. spu_hash_table (info)->non_ovly_stub += 1;
  2419. /* If the symbol type is STT_FUNC then this must be a
  2420. function pointer initialisation. */
  2421. continue;
  2422. }
  2423. /* Ignore data references. */
  2424. if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
  2425. != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
  2426. continue;
  2427. /* Otherwise we probably have a jump table reloc for
  2428. a switch statement or some other reference to a
  2429. code label. */
  2430. }
  2431. if (h)
  2432. val = h->root.u.def.value;
  2433. else
  2434. val = sym->st_value;
  2435. val += irela->r_addend;
  2436. if (!call_tree)
  2437. {
  2438. struct function_info *fun;
  2439. if (irela->r_addend != 0)
  2440. {
  2441. Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
  2442. if (fake == NULL)
  2443. return false;
  2444. fake->st_value = val;
  2445. fake->st_shndx
  2446. = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
  2447. sym = fake;
  2448. }
  2449. if (sym)
  2450. fun = maybe_insert_function (sym_sec, sym, false, is_call);
  2451. else
  2452. fun = maybe_insert_function (sym_sec, h, true, is_call);
  2453. if (fun == NULL)
  2454. return false;
  2455. if (irela->r_addend != 0
  2456. && fun->u.sym != sym)
  2457. free (sym);
  2458. continue;
  2459. }
  2460. caller = find_function (sec, irela->r_offset, info);
  2461. if (caller == NULL)
  2462. return false;
  2463. callee = bfd_malloc (sizeof *callee);
  2464. if (callee == NULL)
  2465. return false;
  2466. callee->fun = find_function (sym_sec, val, info);
  2467. if (callee->fun == NULL)
  2468. return false;
  2469. callee->is_tail = !is_call;
  2470. callee->is_pasted = false;
  2471. callee->broken_cycle = false;
  2472. callee->priority = priority;
  2473. callee->count = nonbranch? 0 : 1;
  2474. if (callee->fun->last_caller != sec)
  2475. {
  2476. callee->fun->last_caller = sec;
  2477. callee->fun->call_count += 1;
  2478. }
  2479. if (!insert_callee (caller, callee))
  2480. free (callee);
  2481. else if (!is_call
  2482. && !callee->fun->is_func
  2483. && callee->fun->stack == 0)
  2484. {
  2485. /* This is either a tail call or a branch from one part of
  2486. the function to another, ie. hot/cold section. If the
  2487. destination has been called by some other function then
  2488. it is a separate function. We also assume that functions
  2489. are not split across input files. */
  2490. if (sec->owner != sym_sec->owner)
  2491. {
  2492. callee->fun->start = NULL;
  2493. callee->fun->is_func = true;
  2494. }
  2495. else if (callee->fun->start == NULL)
  2496. {
  2497. struct function_info *caller_start = caller;
  2498. while (caller_start->start)
  2499. caller_start = caller_start->start;
  2500. if (caller_start != callee->fun)
  2501. callee->fun->start = caller_start;
  2502. }
  2503. else
  2504. {
  2505. struct function_info *callee_start;
  2506. struct function_info *caller_start;
  2507. callee_start = callee->fun;
  2508. while (callee_start->start)
  2509. callee_start = callee_start->start;
  2510. caller_start = caller;
  2511. while (caller_start->start)
  2512. caller_start = caller_start->start;
  2513. if (caller_start != callee_start)
  2514. {
  2515. callee->fun->start = NULL;
  2516. callee->fun->is_func = true;
  2517. }
  2518. }
  2519. }
  2520. }
  2521. return true;
  2522. }
  2523. /* Handle something like .init or .fini, which has a piece of a function.
  2524. These sections are pasted together to form a single function. */
  2525. static bool
  2526. pasted_function (asection *sec)
  2527. {
  2528. struct bfd_link_order *l;
  2529. struct _spu_elf_section_data *sec_data;
  2530. struct spu_elf_stack_info *sinfo;
  2531. Elf_Internal_Sym *fake;
  2532. struct function_info *fun, *fun_start;
  2533. fake = bfd_zmalloc (sizeof (*fake));
  2534. if (fake == NULL)
  2535. return false;
  2536. fake->st_value = 0;
  2537. fake->st_size = sec->size;
  2538. fake->st_shndx
  2539. = _bfd_elf_section_from_bfd_section (sec->owner, sec);
  2540. fun = maybe_insert_function (sec, fake, false, false);
  2541. if (!fun)
  2542. return false;
  2543. /* Find a function immediately preceding this section. */
  2544. fun_start = NULL;
  2545. for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
  2546. {
  2547. if (l->u.indirect.section == sec)
  2548. {
  2549. if (fun_start != NULL)
  2550. {
  2551. struct call_info *callee = bfd_malloc (sizeof *callee);
  2552. if (callee == NULL)
  2553. return false;
  2554. fun->start = fun_start;
  2555. callee->fun = fun;
  2556. callee->is_tail = true;
  2557. callee->is_pasted = true;
  2558. callee->broken_cycle = false;
  2559. callee->priority = 0;
  2560. callee->count = 1;
  2561. if (!insert_callee (fun_start, callee))
  2562. free (callee);
  2563. return true;
  2564. }
  2565. break;
  2566. }
  2567. if (l->type == bfd_indirect_link_order
  2568. && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
  2569. && (sinfo = sec_data->u.i.stack_info) != NULL
  2570. && sinfo->num_fun != 0)
  2571. fun_start = &sinfo->fun[sinfo->num_fun - 1];
  2572. }
  2573. /* Don't return an error if we did not find a function preceding this
  2574. section. The section may have incorrect flags. */
  2575. return true;
  2576. }
  2577. /* Map address ranges in code sections to functions. */
  2578. static bool
  2579. discover_functions (struct bfd_link_info *info)
  2580. {
  2581. bfd *ibfd;
  2582. int bfd_idx;
  2583. Elf_Internal_Sym ***psym_arr;
  2584. asection ***sec_arr;
  2585. bool gaps = false;
  2586. bfd_idx = 0;
  2587. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  2588. bfd_idx++;
  2589. psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
  2590. if (psym_arr == NULL)
  2591. return false;
  2592. sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
  2593. if (sec_arr == NULL)
  2594. return false;
  2595. for (ibfd = info->input_bfds, bfd_idx = 0;
  2596. ibfd != NULL;
  2597. ibfd = ibfd->link.next, bfd_idx++)
  2598. {
  2599. extern const bfd_target spu_elf32_vec;
  2600. Elf_Internal_Shdr *symtab_hdr;
  2601. asection *sec;
  2602. size_t symcount;
  2603. Elf_Internal_Sym *syms, *sy, **psyms, **psy;
  2604. asection **psecs, **p;
  2605. if (ibfd->xvec != &spu_elf32_vec)
  2606. continue;
  2607. /* Read all the symbols. */
  2608. symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
  2609. symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
  2610. if (symcount == 0)
  2611. {
  2612. if (!gaps)
  2613. for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
  2614. if (interesting_section (sec))
  2615. {
  2616. gaps = true;
  2617. break;
  2618. }
  2619. continue;
  2620. }
  2621. /* Don't use cached symbols since the generic ELF linker
  2622. code only reads local symbols, and we need globals too. */
  2623. free (symtab_hdr->contents);
  2624. symtab_hdr->contents = NULL;
  2625. syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
  2626. NULL, NULL, NULL);
  2627. symtab_hdr->contents = (void *) syms;
  2628. if (syms == NULL)
  2629. return false;
  2630. /* Select defined function symbols that are going to be output. */
  2631. psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
  2632. if (psyms == NULL)
  2633. return false;
  2634. psym_arr[bfd_idx] = psyms;
  2635. psecs = bfd_malloc (symcount * sizeof (*psecs));
  2636. if (psecs == NULL)
  2637. return false;
  2638. sec_arr[bfd_idx] = psecs;
  2639. for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
  2640. if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
  2641. || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
  2642. {
  2643. asection *s;
  2644. *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
  2645. if (s != NULL && interesting_section (s))
  2646. *psy++ = sy;
  2647. }
  2648. symcount = psy - psyms;
  2649. *psy = NULL;
  2650. /* Sort them by section and offset within section. */
  2651. sort_syms_syms = syms;
  2652. sort_syms_psecs = psecs;
  2653. qsort (psyms, symcount, sizeof (*psyms), sort_syms);
  2654. /* Now inspect the function symbols. */
  2655. for (psy = psyms; psy < psyms + symcount; )
  2656. {
  2657. asection *s = psecs[*psy - syms];
  2658. Elf_Internal_Sym **psy2;
  2659. for (psy2 = psy; ++psy2 < psyms + symcount; )
  2660. if (psecs[*psy2 - syms] != s)
  2661. break;
  2662. if (!alloc_stack_info (s, psy2 - psy))
  2663. return false;
  2664. psy = psy2;
  2665. }
  2666. /* First install info about properly typed and sized functions.
  2667. In an ideal world this will cover all code sections, except
  2668. when partitioning functions into hot and cold sections,
  2669. and the horrible pasted together .init and .fini functions. */
  2670. for (psy = psyms; psy < psyms + symcount; ++psy)
  2671. {
  2672. sy = *psy;
  2673. if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
  2674. {
  2675. asection *s = psecs[sy - syms];
  2676. if (!maybe_insert_function (s, sy, false, true))
  2677. return false;
  2678. }
  2679. }
  2680. for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
  2681. if (interesting_section (sec))
  2682. gaps |= check_function_ranges (sec, info);
  2683. }
  2684. if (gaps)
  2685. {
  2686. /* See if we can discover more function symbols by looking at
  2687. relocations. */
  2688. for (ibfd = info->input_bfds, bfd_idx = 0;
  2689. ibfd != NULL;
  2690. ibfd = ibfd->link.next, bfd_idx++)
  2691. {
  2692. asection *sec;
  2693. if (psym_arr[bfd_idx] == NULL)
  2694. continue;
  2695. for (sec = ibfd->sections; sec != NULL; sec = sec->next)
  2696. if (!mark_functions_via_relocs (sec, info, false))
  2697. return false;
  2698. }
  2699. for (ibfd = info->input_bfds, bfd_idx = 0;
  2700. ibfd != NULL;
  2701. ibfd = ibfd->link.next, bfd_idx++)
  2702. {
  2703. Elf_Internal_Shdr *symtab_hdr;
  2704. asection *sec;
  2705. Elf_Internal_Sym *syms, *sy, **psyms, **psy;
  2706. asection **psecs;
  2707. if ((psyms = psym_arr[bfd_idx]) == NULL)
  2708. continue;
  2709. psecs = sec_arr[bfd_idx];
  2710. symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
  2711. syms = (Elf_Internal_Sym *) symtab_hdr->contents;
  2712. gaps = false;
  2713. for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
  2714. if (interesting_section (sec))
  2715. gaps |= check_function_ranges (sec, info);
  2716. if (!gaps)
  2717. continue;
  2718. /* Finally, install all globals. */
  2719. for (psy = psyms; (sy = *psy) != NULL; ++psy)
  2720. {
  2721. asection *s;
  2722. s = psecs[sy - syms];
  2723. /* Global syms might be improperly typed functions. */
  2724. if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
  2725. && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
  2726. {
  2727. if (!maybe_insert_function (s, sy, false, false))
  2728. return false;
  2729. }
  2730. }
  2731. }
  2732. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  2733. {
  2734. extern const bfd_target spu_elf32_vec;
  2735. asection *sec;
  2736. if (ibfd->xvec != &spu_elf32_vec)
  2737. continue;
  2738. /* Some of the symbols we've installed as marking the
  2739. beginning of functions may have a size of zero. Extend
  2740. the range of such functions to the beginning of the
  2741. next symbol of interest. */
  2742. for (sec = ibfd->sections; sec != NULL; sec = sec->next)
  2743. if (interesting_section (sec))
  2744. {
  2745. struct _spu_elf_section_data *sec_data;
  2746. struct spu_elf_stack_info *sinfo;
  2747. sec_data = spu_elf_section_data (sec);
  2748. sinfo = sec_data->u.i.stack_info;
  2749. if (sinfo != NULL && sinfo->num_fun != 0)
  2750. {
  2751. int fun_idx;
  2752. bfd_vma hi = sec->size;
  2753. for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
  2754. {
  2755. sinfo->fun[fun_idx].hi = hi;
  2756. hi = sinfo->fun[fun_idx].lo;
  2757. }
  2758. sinfo->fun[0].lo = 0;
  2759. }
  2760. /* No symbols in this section. Must be .init or .fini
  2761. or something similar. */
  2762. else if (!pasted_function (sec))
  2763. return false;
  2764. }
  2765. }
  2766. }
  2767. for (ibfd = info->input_bfds, bfd_idx = 0;
  2768. ibfd != NULL;
  2769. ibfd = ibfd->link.next, bfd_idx++)
  2770. {
  2771. if (psym_arr[bfd_idx] == NULL)
  2772. continue;
  2773. free (psym_arr[bfd_idx]);
  2774. free (sec_arr[bfd_idx]);
  2775. }
  2776. free (psym_arr);
  2777. free (sec_arr);
  2778. return true;
  2779. }
  2780. /* Iterate over all function_info we have collected, calling DOIT on
  2781. each node if ROOT_ONLY is false. Only call DOIT on root nodes
  2782. if ROOT_ONLY. */
  2783. static bool
  2784. for_each_node (bool (*doit) (struct function_info *,
  2785. struct bfd_link_info *,
  2786. void *),
  2787. struct bfd_link_info *info,
  2788. void *param,
  2789. int root_only)
  2790. {
  2791. bfd *ibfd;
  2792. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  2793. {
  2794. extern const bfd_target spu_elf32_vec;
  2795. asection *sec;
  2796. if (ibfd->xvec != &spu_elf32_vec)
  2797. continue;
  2798. for (sec = ibfd->sections; sec != NULL; sec = sec->next)
  2799. {
  2800. struct _spu_elf_section_data *sec_data;
  2801. struct spu_elf_stack_info *sinfo;
  2802. if ((sec_data = spu_elf_section_data (sec)) != NULL
  2803. && (sinfo = sec_data->u.i.stack_info) != NULL)
  2804. {
  2805. int i;
  2806. for (i = 0; i < sinfo->num_fun; ++i)
  2807. if (!root_only || !sinfo->fun[i].non_root)
  2808. if (!doit (&sinfo->fun[i], info, param))
  2809. return false;
  2810. }
  2811. }
  2812. }
  2813. return true;
  2814. }
  2815. /* Transfer call info attached to struct function_info entries for
  2816. all of a given function's sections to the first entry. */
  2817. static bool
  2818. transfer_calls (struct function_info *fun,
  2819. struct bfd_link_info *info ATTRIBUTE_UNUSED,
  2820. void *param ATTRIBUTE_UNUSED)
  2821. {
  2822. struct function_info *start = fun->start;
  2823. if (start != NULL)
  2824. {
  2825. struct call_info *call, *call_next;
  2826. while (start->start != NULL)
  2827. start = start->start;
  2828. for (call = fun->call_list; call != NULL; call = call_next)
  2829. {
  2830. call_next = call->next;
  2831. if (!insert_callee (start, call))
  2832. free (call);
  2833. }
  2834. fun->call_list = NULL;
  2835. }
  2836. return true;
  2837. }
  2838. /* Mark nodes in the call graph that are called by some other node. */
  2839. static bool
  2840. mark_non_root (struct function_info *fun,
  2841. struct bfd_link_info *info ATTRIBUTE_UNUSED,
  2842. void *param ATTRIBUTE_UNUSED)
  2843. {
  2844. struct call_info *call;
  2845. if (fun->visit1)
  2846. return true;
  2847. fun->visit1 = true;
  2848. for (call = fun->call_list; call; call = call->next)
  2849. {
  2850. call->fun->non_root = true;
  2851. mark_non_root (call->fun, 0, 0);
  2852. }
  2853. return true;
  2854. }
  2855. /* Remove cycles from the call graph. Set depth of nodes. */
  2856. static bool
  2857. remove_cycles (struct function_info *fun,
  2858. struct bfd_link_info *info,
  2859. void *param)
  2860. {
  2861. struct call_info **callp, *call;
  2862. unsigned int depth = *(unsigned int *) param;
  2863. unsigned int max_depth = depth;
  2864. fun->depth = depth;
  2865. fun->visit2 = true;
  2866. fun->marking = true;
  2867. callp = &fun->call_list;
  2868. while ((call = *callp) != NULL)
  2869. {
  2870. call->max_depth = depth + !call->is_pasted;
  2871. if (!call->fun->visit2)
  2872. {
  2873. if (!remove_cycles (call->fun, info, &call->max_depth))
  2874. return false;
  2875. if (max_depth < call->max_depth)
  2876. max_depth = call->max_depth;
  2877. }
  2878. else if (call->fun->marking)
  2879. {
  2880. struct spu_link_hash_table *htab = spu_hash_table (info);
  2881. if (!htab->params->auto_overlay
  2882. && htab->params->stack_analysis)
  2883. {
  2884. const char *f1 = func_name (fun);
  2885. const char *f2 = func_name (call->fun);
  2886. /* xgettext:c-format */
  2887. info->callbacks->info (_("stack analysis will ignore the call "
  2888. "from %s to %s\n"),
  2889. f1, f2);
  2890. }
  2891. call->broken_cycle = true;
  2892. }
  2893. callp = &call->next;
  2894. }
  2895. fun->marking = false;
  2896. *(unsigned int *) param = max_depth;
  2897. return true;
  2898. }
  2899. /* Check that we actually visited all nodes in remove_cycles. If we
  2900. didn't, then there is some cycle in the call graph not attached to
  2901. any root node. Arbitrarily choose a node in the cycle as a new
  2902. root and break the cycle. */
  2903. static bool
  2904. mark_detached_root (struct function_info *fun,
  2905. struct bfd_link_info *info,
  2906. void *param)
  2907. {
  2908. if (fun->visit2)
  2909. return true;
  2910. fun->non_root = false;
  2911. *(unsigned int *) param = 0;
  2912. return remove_cycles (fun, info, param);
  2913. }
  2914. /* Populate call_list for each function. */
  2915. static bool
  2916. build_call_tree (struct bfd_link_info *info)
  2917. {
  2918. bfd *ibfd;
  2919. unsigned int depth;
  2920. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  2921. {
  2922. extern const bfd_target spu_elf32_vec;
  2923. asection *sec;
  2924. if (ibfd->xvec != &spu_elf32_vec)
  2925. continue;
  2926. for (sec = ibfd->sections; sec != NULL; sec = sec->next)
  2927. if (!mark_functions_via_relocs (sec, info, true))
  2928. return false;
  2929. }
  2930. /* Transfer call info from hot/cold section part of function
  2931. to main entry. */
  2932. if (!spu_hash_table (info)->params->auto_overlay
  2933. && !for_each_node (transfer_calls, info, 0, false))
  2934. return false;
  2935. /* Find the call graph root(s). */
  2936. if (!for_each_node (mark_non_root, info, 0, false))
  2937. return false;
  2938. /* Remove cycles from the call graph. We start from the root node(s)
  2939. so that we break cycles in a reasonable place. */
  2940. depth = 0;
  2941. if (!for_each_node (remove_cycles, info, &depth, true))
  2942. return false;
  2943. return for_each_node (mark_detached_root, info, &depth, false);
  2944. }
  2945. /* qsort predicate to sort calls by priority, max_depth then count. */
  2946. static int
  2947. sort_calls (const void *a, const void *b)
  2948. {
  2949. struct call_info *const *c1 = a;
  2950. struct call_info *const *c2 = b;
  2951. int delta;
  2952. delta = (*c2)->priority - (*c1)->priority;
  2953. if (delta != 0)
  2954. return delta;
  2955. delta = (*c2)->max_depth - (*c1)->max_depth;
  2956. if (delta != 0)
  2957. return delta;
  2958. delta = (*c2)->count - (*c1)->count;
  2959. if (delta != 0)
  2960. return delta;
  2961. return (char *) c1 - (char *) c2;
  2962. }
  2963. struct _mos_param {
  2964. unsigned int max_overlay_size;
  2965. };
  2966. /* Set linker_mark and gc_mark on any sections that we will put in
  2967. overlays. These flags are used by the generic ELF linker, but we
  2968. won't be continuing on to bfd_elf_final_link so it is OK to use
  2969. them. linker_mark is clear before we get here. Set segment_mark
  2970. on sections that are part of a pasted function (excluding the last
  2971. section).
  2972. Set up function rodata section if --overlay-rodata. We don't
  2973. currently include merged string constant rodata sections since
  2974. Sort the call graph so that the deepest nodes will be visited
  2975. first. */
  2976. static bool
  2977. mark_overlay_section (struct function_info *fun,
  2978. struct bfd_link_info *info,
  2979. void *param)
  2980. {
  2981. struct call_info *call;
  2982. unsigned int count;
  2983. struct _mos_param *mos_param = param;
  2984. struct spu_link_hash_table *htab = spu_hash_table (info);
  2985. if (fun->visit4)
  2986. return true;
  2987. fun->visit4 = true;
  2988. if (!fun->sec->linker_mark
  2989. && (htab->params->ovly_flavour != ovly_soft_icache
  2990. || htab->params->non_ia_text
  2991. || startswith (fun->sec->name, ".text.ia.")
  2992. || strcmp (fun->sec->name, ".init") == 0
  2993. || strcmp (fun->sec->name, ".fini") == 0))
  2994. {
  2995. unsigned int size;
  2996. fun->sec->linker_mark = 1;
  2997. fun->sec->gc_mark = 1;
  2998. fun->sec->segment_mark = 0;
  2999. /* Ensure SEC_CODE is set on this text section (it ought to
  3000. be!), and SEC_CODE is clear on rodata sections. We use
  3001. this flag to differentiate the two overlay section types. */
  3002. fun->sec->flags |= SEC_CODE;
  3003. size = fun->sec->size;
  3004. if (htab->params->auto_overlay & OVERLAY_RODATA)
  3005. {
  3006. char *name = NULL;
  3007. /* Find the rodata section corresponding to this function's
  3008. text section. */
  3009. if (strcmp (fun->sec->name, ".text") == 0)
  3010. {
  3011. name = bfd_malloc (sizeof (".rodata"));
  3012. if (name == NULL)
  3013. return false;
  3014. memcpy (name, ".rodata", sizeof (".rodata"));
  3015. }
  3016. else if (startswith (fun->sec->name, ".text."))
  3017. {
  3018. size_t len = strlen (fun->sec->name);
  3019. name = bfd_malloc (len + 3);
  3020. if (name == NULL)
  3021. return false;
  3022. memcpy (name, ".rodata", sizeof (".rodata"));
  3023. memcpy (name + 7, fun->sec->name + 5, len - 4);
  3024. }
  3025. else if (startswith (fun->sec->name, ".gnu.linkonce.t."))
  3026. {
  3027. size_t len = strlen (fun->sec->name) + 1;
  3028. name = bfd_malloc (len);
  3029. if (name == NULL)
  3030. return false;
  3031. memcpy (name, fun->sec->name, len);
  3032. name[14] = 'r';
  3033. }
  3034. if (name != NULL)
  3035. {
  3036. asection *rodata = NULL;
  3037. asection *group_sec = elf_section_data (fun->sec)->next_in_group;
  3038. if (group_sec == NULL)
  3039. rodata = bfd_get_section_by_name (fun->sec->owner, name);
  3040. else
  3041. while (group_sec != NULL && group_sec != fun->sec)
  3042. {
  3043. if (strcmp (group_sec->name, name) == 0)
  3044. {
  3045. rodata = group_sec;
  3046. break;
  3047. }
  3048. group_sec = elf_section_data (group_sec)->next_in_group;
  3049. }
  3050. fun->rodata = rodata;
  3051. if (fun->rodata)
  3052. {
  3053. size += fun->rodata->size;
  3054. if (htab->params->line_size != 0
  3055. && size > htab->params->line_size)
  3056. {
  3057. size -= fun->rodata->size;
  3058. fun->rodata = NULL;
  3059. }
  3060. else
  3061. {
  3062. fun->rodata->linker_mark = 1;
  3063. fun->rodata->gc_mark = 1;
  3064. fun->rodata->flags &= ~SEC_CODE;
  3065. }
  3066. }
  3067. free (name);
  3068. }
  3069. }
  3070. if (mos_param->max_overlay_size < size)
  3071. mos_param->max_overlay_size = size;
  3072. }
  3073. for (count = 0, call = fun->call_list; call != NULL; call = call->next)
  3074. count += 1;
  3075. if (count > 1)
  3076. {
  3077. struct call_info **calls = bfd_malloc (count * sizeof (*calls));
  3078. if (calls == NULL)
  3079. return false;
  3080. for (count = 0, call = fun->call_list; call != NULL; call = call->next)
  3081. calls[count++] = call;
  3082. qsort (calls, count, sizeof (*calls), sort_calls);
  3083. fun->call_list = NULL;
  3084. while (count != 0)
  3085. {
  3086. --count;
  3087. calls[count]->next = fun->call_list;
  3088. fun->call_list = calls[count];
  3089. }
  3090. free (calls);
  3091. }
  3092. for (call = fun->call_list; call != NULL; call = call->next)
  3093. {
  3094. if (call->is_pasted)
  3095. {
  3096. /* There can only be one is_pasted call per function_info. */
  3097. BFD_ASSERT (!fun->sec->segment_mark);
  3098. fun->sec->segment_mark = 1;
  3099. }
  3100. if (!call->broken_cycle
  3101. && !mark_overlay_section (call->fun, info, param))
  3102. return false;
  3103. }
  3104. /* Don't put entry code into an overlay. The overlay manager needs
  3105. a stack! Also, don't mark .ovl.init as an overlay. */
  3106. if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
  3107. == info->output_bfd->start_address
  3108. || startswith (fun->sec->output_section->name, ".ovl.init"))
  3109. {
  3110. fun->sec->linker_mark = 0;
  3111. if (fun->rodata != NULL)
  3112. fun->rodata->linker_mark = 0;
  3113. }
  3114. return true;
  3115. }
  3116. /* If non-zero then unmark functions called from those within sections
  3117. that we need to unmark. Unfortunately this isn't reliable since the
  3118. call graph cannot know the destination of function pointer calls. */
  3119. #define RECURSE_UNMARK 0
  3120. struct _uos_param {
  3121. asection *exclude_input_section;
  3122. asection *exclude_output_section;
  3123. unsigned long clearing;
  3124. };
  3125. /* Undo some of mark_overlay_section's work. */
  3126. static bool
  3127. unmark_overlay_section (struct function_info *fun,
  3128. struct bfd_link_info *info,
  3129. void *param)
  3130. {
  3131. struct call_info *call;
  3132. struct _uos_param *uos_param = param;
  3133. unsigned int excluded = 0;
  3134. if (fun->visit5)
  3135. return true;
  3136. fun->visit5 = true;
  3137. excluded = 0;
  3138. if (fun->sec == uos_param->exclude_input_section
  3139. || fun->sec->output_section == uos_param->exclude_output_section)
  3140. excluded = 1;
  3141. if (RECURSE_UNMARK)
  3142. uos_param->clearing += excluded;
  3143. if (RECURSE_UNMARK ? uos_param->clearing : excluded)
  3144. {
  3145. fun->sec->linker_mark = 0;
  3146. if (fun->rodata)
  3147. fun->rodata->linker_mark = 0;
  3148. }
  3149. for (call = fun->call_list; call != NULL; call = call->next)
  3150. if (!call->broken_cycle
  3151. && !unmark_overlay_section (call->fun, info, param))
  3152. return false;
  3153. if (RECURSE_UNMARK)
  3154. uos_param->clearing -= excluded;
  3155. return true;
  3156. }
  3157. struct _cl_param {
  3158. unsigned int lib_size;
  3159. asection **lib_sections;
  3160. };
  3161. /* Add sections we have marked as belonging to overlays to an array
  3162. for consideration as non-overlay sections. The array consist of
  3163. pairs of sections, (text,rodata), for functions in the call graph. */
  3164. static bool
  3165. collect_lib_sections (struct function_info *fun,
  3166. struct bfd_link_info *info,
  3167. void *param)
  3168. {
  3169. struct _cl_param *lib_param = param;
  3170. struct call_info *call;
  3171. unsigned int size;
  3172. if (fun->visit6)
  3173. return true;
  3174. fun->visit6 = true;
  3175. if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
  3176. return true;
  3177. size = fun->sec->size;
  3178. if (fun->rodata)
  3179. size += fun->rodata->size;
  3180. if (size <= lib_param->lib_size)
  3181. {
  3182. *lib_param->lib_sections++ = fun->sec;
  3183. fun->sec->gc_mark = 0;
  3184. if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
  3185. {
  3186. *lib_param->lib_sections++ = fun->rodata;
  3187. fun->rodata->gc_mark = 0;
  3188. }
  3189. else
  3190. *lib_param->lib_sections++ = NULL;
  3191. }
  3192. for (call = fun->call_list; call != NULL; call = call->next)
  3193. if (!call->broken_cycle)
  3194. collect_lib_sections (call->fun, info, param);
  3195. return true;
  3196. }
  3197. /* qsort predicate to sort sections by call count. */
  3198. static int
  3199. sort_lib (const void *a, const void *b)
  3200. {
  3201. asection *const *s1 = a;
  3202. asection *const *s2 = b;
  3203. struct _spu_elf_section_data *sec_data;
  3204. struct spu_elf_stack_info *sinfo;
  3205. int delta;
  3206. delta = 0;
  3207. if ((sec_data = spu_elf_section_data (*s1)) != NULL
  3208. && (sinfo = sec_data->u.i.stack_info) != NULL)
  3209. {
  3210. int i;
  3211. for (i = 0; i < sinfo->num_fun; ++i)
  3212. delta -= sinfo->fun[i].call_count;
  3213. }
  3214. if ((sec_data = spu_elf_section_data (*s2)) != NULL
  3215. && (sinfo = sec_data->u.i.stack_info) != NULL)
  3216. {
  3217. int i;
  3218. for (i = 0; i < sinfo->num_fun; ++i)
  3219. delta += sinfo->fun[i].call_count;
  3220. }
  3221. if (delta != 0)
  3222. return delta;
  3223. return s1 - s2;
  3224. }
  3225. /* Remove some sections from those marked to be in overlays. Choose
  3226. those that are called from many places, likely library functions. */
  3227. static unsigned int
  3228. auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
  3229. {
  3230. bfd *ibfd;
  3231. asection **lib_sections;
  3232. unsigned int i, lib_count;
  3233. struct _cl_param collect_lib_param;
  3234. struct function_info dummy_caller;
  3235. struct spu_link_hash_table *htab;
  3236. memset (&dummy_caller, 0, sizeof (dummy_caller));
  3237. lib_count = 0;
  3238. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  3239. {
  3240. extern const bfd_target spu_elf32_vec;
  3241. asection *sec;
  3242. if (ibfd->xvec != &spu_elf32_vec)
  3243. continue;
  3244. for (sec = ibfd->sections; sec != NULL; sec = sec->next)
  3245. if (sec->linker_mark
  3246. && sec->size < lib_size
  3247. && (sec->flags & SEC_CODE) != 0)
  3248. lib_count += 1;
  3249. }
  3250. lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
  3251. if (lib_sections == NULL)
  3252. return (unsigned int) -1;
  3253. collect_lib_param.lib_size = lib_size;
  3254. collect_lib_param.lib_sections = lib_sections;
  3255. if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
  3256. true))
  3257. return (unsigned int) -1;
  3258. lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
  3259. /* Sort sections so that those with the most calls are first. */
  3260. if (lib_count > 1)
  3261. qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
  3262. htab = spu_hash_table (info);
  3263. for (i = 0; i < lib_count; i++)
  3264. {
  3265. unsigned int tmp, stub_size;
  3266. asection *sec;
  3267. struct _spu_elf_section_data *sec_data;
  3268. struct spu_elf_stack_info *sinfo;
  3269. sec = lib_sections[2 * i];
  3270. /* If this section is OK, its size must be less than lib_size. */
  3271. tmp = sec->size;
  3272. /* If it has a rodata section, then add that too. */
  3273. if (lib_sections[2 * i + 1])
  3274. tmp += lib_sections[2 * i + 1]->size;
  3275. /* Add any new overlay call stubs needed by the section. */
  3276. stub_size = 0;
  3277. if (tmp < lib_size
  3278. && (sec_data = spu_elf_section_data (sec)) != NULL
  3279. && (sinfo = sec_data->u.i.stack_info) != NULL)
  3280. {
  3281. int k;
  3282. struct call_info *call;
  3283. for (k = 0; k < sinfo->num_fun; ++k)
  3284. for (call = sinfo->fun[k].call_list; call; call = call->next)
  3285. if (call->fun->sec->linker_mark)
  3286. {
  3287. struct call_info *p;
  3288. for (p = dummy_caller.call_list; p; p = p->next)
  3289. if (p->fun == call->fun)
  3290. break;
  3291. if (!p)
  3292. stub_size += ovl_stub_size (htab->params);
  3293. }
  3294. }
  3295. if (tmp + stub_size < lib_size)
  3296. {
  3297. struct call_info **pp, *p;
  3298. /* This section fits. Mark it as non-overlay. */
  3299. lib_sections[2 * i]->linker_mark = 0;
  3300. if (lib_sections[2 * i + 1])
  3301. lib_sections[2 * i + 1]->linker_mark = 0;
  3302. lib_size -= tmp + stub_size;
  3303. /* Call stubs to the section we just added are no longer
  3304. needed. */
  3305. pp = &dummy_caller.call_list;
  3306. while ((p = *pp) != NULL)
  3307. if (!p->fun->sec->linker_mark)
  3308. {
  3309. lib_size += ovl_stub_size (htab->params);
  3310. *pp = p->next;
  3311. free (p);
  3312. }
  3313. else
  3314. pp = &p->next;
  3315. /* Add new call stubs to dummy_caller. */
  3316. if ((sec_data = spu_elf_section_data (sec)) != NULL
  3317. && (sinfo = sec_data->u.i.stack_info) != NULL)
  3318. {
  3319. int k;
  3320. struct call_info *call;
  3321. for (k = 0; k < sinfo->num_fun; ++k)
  3322. for (call = sinfo->fun[k].call_list;
  3323. call;
  3324. call = call->next)
  3325. if (call->fun->sec->linker_mark)
  3326. {
  3327. struct call_info *callee;
  3328. callee = bfd_malloc (sizeof (*callee));
  3329. if (callee == NULL)
  3330. return (unsigned int) -1;
  3331. *callee = *call;
  3332. if (!insert_callee (&dummy_caller, callee))
  3333. free (callee);
  3334. }
  3335. }
  3336. }
  3337. }
  3338. while (dummy_caller.call_list != NULL)
  3339. {
  3340. struct call_info *call = dummy_caller.call_list;
  3341. dummy_caller.call_list = call->next;
  3342. free (call);
  3343. }
  3344. for (i = 0; i < 2 * lib_count; i++)
  3345. if (lib_sections[i])
  3346. lib_sections[i]->gc_mark = 1;
  3347. free (lib_sections);
  3348. return lib_size;
  3349. }
  3350. /* Build an array of overlay sections. The deepest node's section is
  3351. added first, then its parent node's section, then everything called
  3352. from the parent section. The idea being to group sections to
  3353. minimise calls between different overlays. */
  3354. static bool
  3355. collect_overlays (struct function_info *fun,
  3356. struct bfd_link_info *info,
  3357. void *param)
  3358. {
  3359. struct call_info *call;
  3360. bool added_fun;
  3361. asection ***ovly_sections = param;
  3362. if (fun->visit7)
  3363. return true;
  3364. fun->visit7 = true;
  3365. for (call = fun->call_list; call != NULL; call = call->next)
  3366. if (!call->is_pasted && !call->broken_cycle)
  3367. {
  3368. if (!collect_overlays (call->fun, info, ovly_sections))
  3369. return false;
  3370. break;
  3371. }
  3372. added_fun = false;
  3373. if (fun->sec->linker_mark && fun->sec->gc_mark)
  3374. {
  3375. fun->sec->gc_mark = 0;
  3376. *(*ovly_sections)++ = fun->sec;
  3377. if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
  3378. {
  3379. fun->rodata->gc_mark = 0;
  3380. *(*ovly_sections)++ = fun->rodata;
  3381. }
  3382. else
  3383. *(*ovly_sections)++ = NULL;
  3384. added_fun = true;
  3385. /* Pasted sections must stay with the first section. We don't
  3386. put pasted sections in the array, just the first section.
  3387. Mark subsequent sections as already considered. */
  3388. if (fun->sec->segment_mark)
  3389. {
  3390. struct function_info *call_fun = fun;
  3391. do
  3392. {
  3393. for (call = call_fun->call_list; call != NULL; call = call->next)
  3394. if (call->is_pasted)
  3395. {
  3396. call_fun = call->fun;
  3397. call_fun->sec->gc_mark = 0;
  3398. if (call_fun->rodata)
  3399. call_fun->rodata->gc_mark = 0;
  3400. break;
  3401. }
  3402. if (call == NULL)
  3403. abort ();
  3404. }
  3405. while (call_fun->sec->segment_mark);
  3406. }
  3407. }
  3408. for (call = fun->call_list; call != NULL; call = call->next)
  3409. if (!call->broken_cycle
  3410. && !collect_overlays (call->fun, info, ovly_sections))
  3411. return false;
  3412. if (added_fun)
  3413. {
  3414. struct _spu_elf_section_data *sec_data;
  3415. struct spu_elf_stack_info *sinfo;
  3416. if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
  3417. && (sinfo = sec_data->u.i.stack_info) != NULL)
  3418. {
  3419. int i;
  3420. for (i = 0; i < sinfo->num_fun; ++i)
  3421. if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
  3422. return false;
  3423. }
  3424. }
  3425. return true;
  3426. }
  3427. struct _sum_stack_param {
  3428. size_t cum_stack;
  3429. size_t overall_stack;
  3430. bool emit_stack_syms;
  3431. };
  3432. /* Descend the call graph for FUN, accumulating total stack required. */
  3433. static bool
  3434. sum_stack (struct function_info *fun,
  3435. struct bfd_link_info *info,
  3436. void *param)
  3437. {
  3438. struct call_info *call;
  3439. struct function_info *max;
  3440. size_t stack, cum_stack;
  3441. const char *f1;
  3442. bool has_call;
  3443. struct _sum_stack_param *sum_stack_param = param;
  3444. struct spu_link_hash_table *htab;
  3445. cum_stack = fun->stack;
  3446. sum_stack_param->cum_stack = cum_stack;
  3447. if (fun->visit3)
  3448. return true;
  3449. has_call = false;
  3450. max = NULL;
  3451. for (call = fun->call_list; call; call = call->next)
  3452. {
  3453. if (call->broken_cycle)
  3454. continue;
  3455. if (!call->is_pasted)
  3456. has_call = true;
  3457. if (!sum_stack (call->fun, info, sum_stack_param))
  3458. return false;
  3459. stack = sum_stack_param->cum_stack;
  3460. /* Include caller stack for normal calls, don't do so for
  3461. tail calls. fun->stack here is local stack usage for
  3462. this function. */
  3463. if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
  3464. stack += fun->stack;
  3465. if (cum_stack < stack)
  3466. {
  3467. cum_stack = stack;
  3468. max = call->fun;
  3469. }
  3470. }
  3471. sum_stack_param->cum_stack = cum_stack;
  3472. stack = fun->stack;
  3473. /* Now fun->stack holds cumulative stack. */
  3474. fun->stack = cum_stack;
  3475. fun->visit3 = true;
  3476. if (!fun->non_root
  3477. && sum_stack_param->overall_stack < cum_stack)
  3478. sum_stack_param->overall_stack = cum_stack;
  3479. htab = spu_hash_table (info);
  3480. if (htab->params->auto_overlay)
  3481. return true;
  3482. f1 = func_name (fun);
  3483. if (htab->params->stack_analysis)
  3484. {
  3485. if (!fun->non_root)
  3486. info->callbacks->info (" %s: 0x%v\n", f1, (bfd_vma) cum_stack);
  3487. info->callbacks->minfo ("%s: 0x%v 0x%v\n",
  3488. f1, (bfd_vma) stack, (bfd_vma) cum_stack);
  3489. if (has_call)
  3490. {
  3491. info->callbacks->minfo (_(" calls:\n"));
  3492. for (call = fun->call_list; call; call = call->next)
  3493. if (!call->is_pasted && !call->broken_cycle)
  3494. {
  3495. const char *f2 = func_name (call->fun);
  3496. const char *ann1 = call->fun == max ? "*" : " ";
  3497. const char *ann2 = call->is_tail ? "t" : " ";
  3498. info->callbacks->minfo (" %s%s %s\n", ann1, ann2, f2);
  3499. }
  3500. }
  3501. }
  3502. if (sum_stack_param->emit_stack_syms)
  3503. {
  3504. char *name = bfd_malloc (18 + strlen (f1));
  3505. struct elf_link_hash_entry *h;
  3506. if (name == NULL)
  3507. return false;
  3508. if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
  3509. sprintf (name, "__stack_%s", f1);
  3510. else
  3511. sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
  3512. h = elf_link_hash_lookup (&htab->elf, name, true, true, false);
  3513. free (name);
  3514. if (h != NULL
  3515. && (h->root.type == bfd_link_hash_new
  3516. || h->root.type == bfd_link_hash_undefined
  3517. || h->root.type == bfd_link_hash_undefweak))
  3518. {
  3519. h->root.type = bfd_link_hash_defined;
  3520. h->root.u.def.section = bfd_abs_section_ptr;
  3521. h->root.u.def.value = cum_stack;
  3522. h->size = 0;
  3523. h->type = 0;
  3524. h->ref_regular = 1;
  3525. h->def_regular = 1;
  3526. h->ref_regular_nonweak = 1;
  3527. h->forced_local = 1;
  3528. h->non_elf = 0;
  3529. }
  3530. }
  3531. return true;
  3532. }
  3533. /* SEC is part of a pasted function. Return the call_info for the
  3534. next section of this function. */
  3535. static struct call_info *
  3536. find_pasted_call (asection *sec)
  3537. {
  3538. struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
  3539. struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
  3540. struct call_info *call;
  3541. int k;
  3542. for (k = 0; k < sinfo->num_fun; ++k)
  3543. for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
  3544. if (call->is_pasted)
  3545. return call;
  3546. abort ();
  3547. return 0;
  3548. }
  3549. /* qsort predicate to sort bfds by file name. */
  3550. static int
  3551. sort_bfds (const void *a, const void *b)
  3552. {
  3553. bfd *const *abfd1 = a;
  3554. bfd *const *abfd2 = b;
  3555. return filename_cmp (bfd_get_filename (*abfd1), bfd_get_filename (*abfd2));
  3556. }
  3557. static unsigned int
  3558. print_one_overlay_section (FILE *script,
  3559. unsigned int base,
  3560. unsigned int count,
  3561. unsigned int ovlynum,
  3562. unsigned int *ovly_map,
  3563. asection **ovly_sections,
  3564. struct bfd_link_info *info)
  3565. {
  3566. unsigned int j;
  3567. for (j = base; j < count && ovly_map[j] == ovlynum; j++)
  3568. {
  3569. asection *sec = ovly_sections[2 * j];
  3570. if (fprintf (script, " %s%c%s (%s)\n",
  3571. (sec->owner->my_archive != NULL
  3572. ? bfd_get_filename (sec->owner->my_archive) : ""),
  3573. info->path_separator,
  3574. bfd_get_filename (sec->owner),
  3575. sec->name) <= 0)
  3576. return -1;
  3577. if (sec->segment_mark)
  3578. {
  3579. struct call_info *call = find_pasted_call (sec);
  3580. while (call != NULL)
  3581. {
  3582. struct function_info *call_fun = call->fun;
  3583. sec = call_fun->sec;
  3584. if (fprintf (script, " %s%c%s (%s)\n",
  3585. (sec->owner->my_archive != NULL
  3586. ? bfd_get_filename (sec->owner->my_archive) : ""),
  3587. info->path_separator,
  3588. bfd_get_filename (sec->owner),
  3589. sec->name) <= 0)
  3590. return -1;
  3591. for (call = call_fun->call_list; call; call = call->next)
  3592. if (call->is_pasted)
  3593. break;
  3594. }
  3595. }
  3596. }
  3597. for (j = base; j < count && ovly_map[j] == ovlynum; j++)
  3598. {
  3599. asection *sec = ovly_sections[2 * j + 1];
  3600. if (sec != NULL
  3601. && fprintf (script, " %s%c%s (%s)\n",
  3602. (sec->owner->my_archive != NULL
  3603. ? bfd_get_filename (sec->owner->my_archive) : ""),
  3604. info->path_separator,
  3605. bfd_get_filename (sec->owner),
  3606. sec->name) <= 0)
  3607. return -1;
  3608. sec = ovly_sections[2 * j];
  3609. if (sec->segment_mark)
  3610. {
  3611. struct call_info *call = find_pasted_call (sec);
  3612. while (call != NULL)
  3613. {
  3614. struct function_info *call_fun = call->fun;
  3615. sec = call_fun->rodata;
  3616. if (sec != NULL
  3617. && fprintf (script, " %s%c%s (%s)\n",
  3618. (sec->owner->my_archive != NULL
  3619. ? bfd_get_filename (sec->owner->my_archive) : ""),
  3620. info->path_separator,
  3621. bfd_get_filename (sec->owner),
  3622. sec->name) <= 0)
  3623. return -1;
  3624. for (call = call_fun->call_list; call; call = call->next)
  3625. if (call->is_pasted)
  3626. break;
  3627. }
  3628. }
  3629. }
  3630. return j;
  3631. }
  3632. /* Handle --auto-overlay. */
  3633. static void
  3634. spu_elf_auto_overlay (struct bfd_link_info *info)
  3635. {
  3636. bfd *ibfd;
  3637. bfd **bfd_arr;
  3638. struct elf_segment_map *m;
  3639. unsigned int fixed_size, lo, hi;
  3640. unsigned int reserved;
  3641. struct spu_link_hash_table *htab;
  3642. unsigned int base, i, count, bfd_count;
  3643. unsigned int region, ovlynum;
  3644. asection **ovly_sections, **ovly_p;
  3645. unsigned int *ovly_map;
  3646. FILE *script;
  3647. unsigned int total_overlay_size, overlay_size;
  3648. const char *ovly_mgr_entry;
  3649. struct elf_link_hash_entry *h;
  3650. struct _mos_param mos_param;
  3651. struct _uos_param uos_param;
  3652. struct function_info dummy_caller;
  3653. /* Find the extents of our loadable image. */
  3654. lo = (unsigned int) -1;
  3655. hi = 0;
  3656. for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
  3657. if (m->p_type == PT_LOAD)
  3658. for (i = 0; i < m->count; i++)
  3659. if (m->sections[i]->size != 0)
  3660. {
  3661. if (m->sections[i]->vma < lo)
  3662. lo = m->sections[i]->vma;
  3663. if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
  3664. hi = m->sections[i]->vma + m->sections[i]->size - 1;
  3665. }
  3666. fixed_size = hi + 1 - lo;
  3667. if (!discover_functions (info))
  3668. goto err_exit;
  3669. if (!build_call_tree (info))
  3670. goto err_exit;
  3671. htab = spu_hash_table (info);
  3672. reserved = htab->params->auto_overlay_reserved;
  3673. if (reserved == 0)
  3674. {
  3675. struct _sum_stack_param sum_stack_param;
  3676. sum_stack_param.emit_stack_syms = 0;
  3677. sum_stack_param.overall_stack = 0;
  3678. if (!for_each_node (sum_stack, info, &sum_stack_param, true))
  3679. goto err_exit;
  3680. reserved = (sum_stack_param.overall_stack
  3681. + htab->params->extra_stack_space);
  3682. }
  3683. /* No need for overlays if everything already fits. */
  3684. if (fixed_size + reserved <= htab->local_store
  3685. && htab->params->ovly_flavour != ovly_soft_icache)
  3686. {
  3687. htab->params->auto_overlay = 0;
  3688. return;
  3689. }
  3690. uos_param.exclude_input_section = 0;
  3691. uos_param.exclude_output_section
  3692. = bfd_get_section_by_name (info->output_bfd, ".interrupt");
  3693. ovly_mgr_entry = "__ovly_load";
  3694. if (htab->params->ovly_flavour == ovly_soft_icache)
  3695. ovly_mgr_entry = "__icache_br_handler";
  3696. h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
  3697. false, false, false);
  3698. if (h != NULL
  3699. && (h->root.type == bfd_link_hash_defined
  3700. || h->root.type == bfd_link_hash_defweak)
  3701. && h->def_regular)
  3702. {
  3703. /* We have a user supplied overlay manager. */
  3704. uos_param.exclude_input_section = h->root.u.def.section;
  3705. }
  3706. else
  3707. {
  3708. /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
  3709. builtin version to .text, and will adjust .text size. */
  3710. fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
  3711. }
  3712. /* Mark overlay sections, and find max overlay section size. */
  3713. mos_param.max_overlay_size = 0;
  3714. if (!for_each_node (mark_overlay_section, info, &mos_param, true))
  3715. goto err_exit;
  3716. /* We can't put the overlay manager or interrupt routines in
  3717. overlays. */
  3718. uos_param.clearing = 0;
  3719. if ((uos_param.exclude_input_section
  3720. || uos_param.exclude_output_section)
  3721. && !for_each_node (unmark_overlay_section, info, &uos_param, true))
  3722. goto err_exit;
  3723. bfd_count = 0;
  3724. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  3725. ++bfd_count;
  3726. bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
  3727. if (bfd_arr == NULL)
  3728. goto err_exit;
  3729. /* Count overlay sections, and subtract their sizes from "fixed_size". */
  3730. count = 0;
  3731. bfd_count = 0;
  3732. total_overlay_size = 0;
  3733. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  3734. {
  3735. extern const bfd_target spu_elf32_vec;
  3736. asection *sec;
  3737. unsigned int old_count;
  3738. if (ibfd->xvec != &spu_elf32_vec)
  3739. continue;
  3740. old_count = count;
  3741. for (sec = ibfd->sections; sec != NULL; sec = sec->next)
  3742. if (sec->linker_mark)
  3743. {
  3744. if ((sec->flags & SEC_CODE) != 0)
  3745. count += 1;
  3746. fixed_size -= sec->size;
  3747. total_overlay_size += sec->size;
  3748. }
  3749. else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
  3750. && sec->output_section->owner == info->output_bfd
  3751. && startswith (sec->output_section->name, ".ovl.init"))
  3752. fixed_size -= sec->size;
  3753. if (count != old_count)
  3754. bfd_arr[bfd_count++] = ibfd;
  3755. }
  3756. /* Since the overlay link script selects sections by file name and
  3757. section name, ensure that file names are unique. */
  3758. if (bfd_count > 1)
  3759. {
  3760. bool ok = true;
  3761. qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
  3762. for (i = 1; i < bfd_count; ++i)
  3763. if (filename_cmp (bfd_get_filename (bfd_arr[i - 1]),
  3764. bfd_get_filename (bfd_arr[i])) == 0)
  3765. {
  3766. if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
  3767. {
  3768. if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
  3769. /* xgettext:c-format */
  3770. info->callbacks->einfo (_("%s duplicated in %s\n"),
  3771. bfd_get_filename (bfd_arr[i]),
  3772. bfd_get_filename (bfd_arr[i]->my_archive));
  3773. else
  3774. info->callbacks->einfo (_("%s duplicated\n"),
  3775. bfd_get_filename (bfd_arr[i]));
  3776. ok = false;
  3777. }
  3778. }
  3779. if (!ok)
  3780. {
  3781. info->callbacks->einfo (_("sorry, no support for duplicate "
  3782. "object files in auto-overlay script\n"));
  3783. bfd_set_error (bfd_error_bad_value);
  3784. goto err_exit;
  3785. }
  3786. }
  3787. free (bfd_arr);
  3788. fixed_size += reserved;
  3789. fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
  3790. if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
  3791. {
  3792. if (htab->params->ovly_flavour == ovly_soft_icache)
  3793. {
  3794. /* Stubs in the non-icache area are bigger. */
  3795. fixed_size += htab->non_ovly_stub * 16;
  3796. /* Space for icache manager tables.
  3797. a) Tag array, one quadword per cache line.
  3798. - word 0: ia address of present line, init to zero. */
  3799. fixed_size += 16 << htab->num_lines_log2;
  3800. /* b) Rewrite "to" list, one quadword per cache line. */
  3801. fixed_size += 16 << htab->num_lines_log2;
  3802. /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
  3803. to a power-of-two number of full quadwords) per cache line. */
  3804. fixed_size += 16 << (htab->fromelem_size_log2
  3805. + htab->num_lines_log2);
  3806. /* d) Pointer to __ea backing store (toe), 1 quadword. */
  3807. fixed_size += 16;
  3808. }
  3809. else
  3810. {
  3811. /* Guess number of overlays. Assuming overlay buffer is on
  3812. average only half full should be conservative. */
  3813. ovlynum = (total_overlay_size * 2 * htab->params->num_lines
  3814. / (htab->local_store - fixed_size));
  3815. /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
  3816. fixed_size += ovlynum * 16 + 16 + 4 + 16;
  3817. }
  3818. }
  3819. if (fixed_size + mos_param.max_overlay_size > htab->local_store)
  3820. /* xgettext:c-format */
  3821. info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
  3822. "size of 0x%v exceeds local store\n"),
  3823. (bfd_vma) fixed_size,
  3824. (bfd_vma) mos_param.max_overlay_size);
  3825. /* Now see if we should put some functions in the non-overlay area. */
  3826. else if (fixed_size < htab->params->auto_overlay_fixed)
  3827. {
  3828. unsigned int max_fixed, lib_size;
  3829. max_fixed = htab->local_store - mos_param.max_overlay_size;
  3830. if (max_fixed > htab->params->auto_overlay_fixed)
  3831. max_fixed = htab->params->auto_overlay_fixed;
  3832. lib_size = max_fixed - fixed_size;
  3833. lib_size = auto_ovl_lib_functions (info, lib_size);
  3834. if (lib_size == (unsigned int) -1)
  3835. goto err_exit;
  3836. fixed_size = max_fixed - lib_size;
  3837. }
  3838. /* Build an array of sections, suitably sorted to place into
  3839. overlays. */
  3840. ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
  3841. if (ovly_sections == NULL)
  3842. goto err_exit;
  3843. ovly_p = ovly_sections;
  3844. if (!for_each_node (collect_overlays, info, &ovly_p, true))
  3845. goto err_exit;
  3846. count = (size_t) (ovly_p - ovly_sections) / 2;
  3847. ovly_map = bfd_malloc (count * sizeof (*ovly_map));
  3848. if (ovly_map == NULL)
  3849. goto err_exit;
  3850. memset (&dummy_caller, 0, sizeof (dummy_caller));
  3851. overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
  3852. if (htab->params->line_size != 0)
  3853. overlay_size = htab->params->line_size;
  3854. base = 0;
  3855. ovlynum = 0;
  3856. while (base < count)
  3857. {
  3858. unsigned int size = 0, rosize = 0, roalign = 0;
  3859. for (i = base; i < count; i++)
  3860. {
  3861. asection *sec, *rosec;
  3862. unsigned int tmp, rotmp;
  3863. unsigned int num_stubs;
  3864. struct call_info *call, *pasty;
  3865. struct _spu_elf_section_data *sec_data;
  3866. struct spu_elf_stack_info *sinfo;
  3867. unsigned int k;
  3868. /* See whether we can add this section to the current
  3869. overlay without overflowing our overlay buffer. */
  3870. sec = ovly_sections[2 * i];
  3871. tmp = align_power (size, sec->alignment_power) + sec->size;
  3872. rotmp = rosize;
  3873. rosec = ovly_sections[2 * i + 1];
  3874. if (rosec != NULL)
  3875. {
  3876. rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
  3877. if (roalign < rosec->alignment_power)
  3878. roalign = rosec->alignment_power;
  3879. }
  3880. if (align_power (tmp, roalign) + rotmp > overlay_size)
  3881. break;
  3882. if (sec->segment_mark)
  3883. {
  3884. /* Pasted sections must stay together, so add their
  3885. sizes too. */
  3886. pasty = find_pasted_call (sec);
  3887. while (pasty != NULL)
  3888. {
  3889. struct function_info *call_fun = pasty->fun;
  3890. tmp = (align_power (tmp, call_fun->sec->alignment_power)
  3891. + call_fun->sec->size);
  3892. if (call_fun->rodata)
  3893. {
  3894. rotmp = (align_power (rotmp,
  3895. call_fun->rodata->alignment_power)
  3896. + call_fun->rodata->size);
  3897. if (roalign < rosec->alignment_power)
  3898. roalign = rosec->alignment_power;
  3899. }
  3900. for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
  3901. if (pasty->is_pasted)
  3902. break;
  3903. }
  3904. }
  3905. if (align_power (tmp, roalign) + rotmp > overlay_size)
  3906. break;
  3907. /* If we add this section, we might need new overlay call
  3908. stubs. Add any overlay section calls to dummy_call. */
  3909. pasty = NULL;
  3910. sec_data = spu_elf_section_data (sec);
  3911. sinfo = sec_data->u.i.stack_info;
  3912. for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
  3913. for (call = sinfo->fun[k].call_list; call; call = call->next)
  3914. if (call->is_pasted)
  3915. {
  3916. BFD_ASSERT (pasty == NULL);
  3917. pasty = call;
  3918. }
  3919. else if (call->fun->sec->linker_mark)
  3920. {
  3921. if (!copy_callee (&dummy_caller, call))
  3922. goto err_exit;
  3923. }
  3924. while (pasty != NULL)
  3925. {
  3926. struct function_info *call_fun = pasty->fun;
  3927. pasty = NULL;
  3928. for (call = call_fun->call_list; call; call = call->next)
  3929. if (call->is_pasted)
  3930. {
  3931. BFD_ASSERT (pasty == NULL);
  3932. pasty = call;
  3933. }
  3934. else if (!copy_callee (&dummy_caller, call))
  3935. goto err_exit;
  3936. }
  3937. /* Calculate call stub size. */
  3938. num_stubs = 0;
  3939. for (call = dummy_caller.call_list; call; call = call->next)
  3940. {
  3941. unsigned int stub_delta = 1;
  3942. if (htab->params->ovly_flavour == ovly_soft_icache)
  3943. stub_delta = call->count;
  3944. num_stubs += stub_delta;
  3945. /* If the call is within this overlay, we won't need a
  3946. stub. */
  3947. for (k = base; k < i + 1; k++)
  3948. if (call->fun->sec == ovly_sections[2 * k])
  3949. {
  3950. num_stubs -= stub_delta;
  3951. break;
  3952. }
  3953. }
  3954. if (htab->params->ovly_flavour == ovly_soft_icache
  3955. && num_stubs > htab->params->max_branch)
  3956. break;
  3957. if (align_power (tmp, roalign) + rotmp
  3958. + num_stubs * ovl_stub_size (htab->params) > overlay_size)
  3959. break;
  3960. size = tmp;
  3961. rosize = rotmp;
  3962. }
  3963. if (i == base)
  3964. {
  3965. /* xgettext:c-format */
  3966. info->callbacks->einfo (_("%pB:%pA%s exceeds overlay size\n"),
  3967. ovly_sections[2 * i]->owner,
  3968. ovly_sections[2 * i],
  3969. ovly_sections[2 * i + 1] ? " + rodata" : "");
  3970. bfd_set_error (bfd_error_bad_value);
  3971. goto err_exit;
  3972. }
  3973. while (dummy_caller.call_list != NULL)
  3974. {
  3975. struct call_info *call = dummy_caller.call_list;
  3976. dummy_caller.call_list = call->next;
  3977. free (call);
  3978. }
  3979. ++ovlynum;
  3980. while (base < i)
  3981. ovly_map[base++] = ovlynum;
  3982. }
  3983. script = htab->params->spu_elf_open_overlay_script ();
  3984. if (htab->params->ovly_flavour == ovly_soft_icache)
  3985. {
  3986. if (fprintf (script, "SECTIONS\n{\n") <= 0)
  3987. goto file_err;
  3988. if (fprintf (script,
  3989. " . = ALIGN (%u);\n"
  3990. " .ovl.init : { *(.ovl.init) }\n"
  3991. " . = ABSOLUTE (ADDR (.ovl.init));\n",
  3992. htab->params->line_size) <= 0)
  3993. goto file_err;
  3994. base = 0;
  3995. ovlynum = 1;
  3996. while (base < count)
  3997. {
  3998. unsigned int indx = ovlynum - 1;
  3999. unsigned int vma, lma;
  4000. vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
  4001. lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
  4002. if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
  4003. ": AT (LOADADDR (.ovl.init) + %u) {\n",
  4004. ovlynum, vma, lma) <= 0)
  4005. goto file_err;
  4006. base = print_one_overlay_section (script, base, count, ovlynum,
  4007. ovly_map, ovly_sections, info);
  4008. if (base == (unsigned) -1)
  4009. goto file_err;
  4010. if (fprintf (script, " }\n") <= 0)
  4011. goto file_err;
  4012. ovlynum++;
  4013. }
  4014. if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
  4015. 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
  4016. goto file_err;
  4017. if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
  4018. goto file_err;
  4019. }
  4020. else
  4021. {
  4022. if (fprintf (script, "SECTIONS\n{\n") <= 0)
  4023. goto file_err;
  4024. if (fprintf (script,
  4025. " . = ALIGN (16);\n"
  4026. " .ovl.init : { *(.ovl.init) }\n"
  4027. " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
  4028. goto file_err;
  4029. for (region = 1; region <= htab->params->num_lines; region++)
  4030. {
  4031. ovlynum = region;
  4032. base = 0;
  4033. while (base < count && ovly_map[base] < ovlynum)
  4034. base++;
  4035. if (base == count)
  4036. break;
  4037. if (region == 1)
  4038. {
  4039. /* We need to set lma since we are overlaying .ovl.init. */
  4040. if (fprintf (script,
  4041. " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
  4042. goto file_err;
  4043. }
  4044. else
  4045. {
  4046. if (fprintf (script, " OVERLAY :\n {\n") <= 0)
  4047. goto file_err;
  4048. }
  4049. while (base < count)
  4050. {
  4051. if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
  4052. goto file_err;
  4053. base = print_one_overlay_section (script, base, count, ovlynum,
  4054. ovly_map, ovly_sections, info);
  4055. if (base == (unsigned) -1)
  4056. goto file_err;
  4057. if (fprintf (script, " }\n") <= 0)
  4058. goto file_err;
  4059. ovlynum += htab->params->num_lines;
  4060. while (base < count && ovly_map[base] < ovlynum)
  4061. base++;
  4062. }
  4063. if (fprintf (script, " }\n") <= 0)
  4064. goto file_err;
  4065. }
  4066. if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
  4067. goto file_err;
  4068. }
  4069. free (ovly_map);
  4070. free (ovly_sections);
  4071. if (fclose (script) != 0)
  4072. goto file_err;
  4073. if (htab->params->auto_overlay & AUTO_RELINK)
  4074. (*htab->params->spu_elf_relink) ();
  4075. xexit (0);
  4076. file_err:
  4077. bfd_set_error (bfd_error_system_call);
  4078. err_exit:
  4079. info->callbacks->einfo (_("%F%P: auto overlay error: %E\n"));
  4080. xexit (1);
  4081. }
  4082. /* Provide an estimate of total stack required. */
  4083. static bool
  4084. spu_elf_stack_analysis (struct bfd_link_info *info)
  4085. {
  4086. struct spu_link_hash_table *htab;
  4087. struct _sum_stack_param sum_stack_param;
  4088. if (!discover_functions (info))
  4089. return false;
  4090. if (!build_call_tree (info))
  4091. return false;
  4092. htab = spu_hash_table (info);
  4093. if (htab->params->stack_analysis)
  4094. {
  4095. info->callbacks->info (_("Stack size for call graph root nodes.\n"));
  4096. info->callbacks->minfo (_("\nStack size for functions. "
  4097. "Annotations: '*' max stack, 't' tail call\n"));
  4098. }
  4099. sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
  4100. sum_stack_param.overall_stack = 0;
  4101. if (!for_each_node (sum_stack, info, &sum_stack_param, true))
  4102. return false;
  4103. if (htab->params->stack_analysis)
  4104. info->callbacks->info (_("Maximum stack required is 0x%v\n"),
  4105. (bfd_vma) sum_stack_param.overall_stack);
  4106. return true;
  4107. }
  4108. /* Perform a final link. */
  4109. static bool
  4110. spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
  4111. {
  4112. struct spu_link_hash_table *htab = spu_hash_table (info);
  4113. if (htab->params->auto_overlay)
  4114. spu_elf_auto_overlay (info);
  4115. if ((htab->params->stack_analysis
  4116. || (htab->params->ovly_flavour == ovly_soft_icache
  4117. && htab->params->lrlive_analysis))
  4118. && !spu_elf_stack_analysis (info))
  4119. info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
  4120. if (!spu_elf_build_stubs (info))
  4121. info->callbacks->einfo (_("%F%P: can not build overlay stubs: %E\n"));
  4122. return bfd_elf_final_link (output_bfd, info);
  4123. }
  4124. /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
  4125. and !info->emitrelocations. Returns a count of special relocs
  4126. that need to be emitted. */
  4127. static unsigned int
  4128. spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
  4129. {
  4130. Elf_Internal_Rela *relocs;
  4131. unsigned int count = 0;
  4132. relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
  4133. info->keep_memory);
  4134. if (relocs != NULL)
  4135. {
  4136. Elf_Internal_Rela *rel;
  4137. Elf_Internal_Rela *relend = relocs + sec->reloc_count;
  4138. for (rel = relocs; rel < relend; rel++)
  4139. {
  4140. int r_type = ELF32_R_TYPE (rel->r_info);
  4141. if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
  4142. ++count;
  4143. }
  4144. if (elf_section_data (sec)->relocs != relocs)
  4145. free (relocs);
  4146. }
  4147. return count;
  4148. }
  4149. /* Functions for adding fixup records to .fixup */
  4150. #define FIXUP_RECORD_SIZE 4
  4151. #define FIXUP_PUT(output_bfd,htab,index,addr) \
  4152. bfd_put_32 (output_bfd, addr, \
  4153. htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
  4154. #define FIXUP_GET(output_bfd,htab,index) \
  4155. bfd_get_32 (output_bfd, \
  4156. htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
  4157. /* Store OFFSET in .fixup. This assumes it will be called with an
  4158. increasing OFFSET. When this OFFSET fits with the last base offset,
  4159. it just sets a bit, otherwise it adds a new fixup record. */
  4160. static void
  4161. spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
  4162. bfd_vma offset)
  4163. {
  4164. struct spu_link_hash_table *htab = spu_hash_table (info);
  4165. asection *sfixup = htab->sfixup;
  4166. bfd_vma qaddr = offset & ~(bfd_vma) 15;
  4167. bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
  4168. if (sfixup->reloc_count == 0)
  4169. {
  4170. FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
  4171. sfixup->reloc_count++;
  4172. }
  4173. else
  4174. {
  4175. bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
  4176. if (qaddr != (base & ~(bfd_vma) 15))
  4177. {
  4178. if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
  4179. _bfd_error_handler (_("fatal error while creating .fixup"));
  4180. FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
  4181. sfixup->reloc_count++;
  4182. }
  4183. else
  4184. FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
  4185. }
  4186. }
  4187. /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
  4188. static int
  4189. spu_elf_relocate_section (bfd *output_bfd,
  4190. struct bfd_link_info *info,
  4191. bfd *input_bfd,
  4192. asection *input_section,
  4193. bfd_byte *contents,
  4194. Elf_Internal_Rela *relocs,
  4195. Elf_Internal_Sym *local_syms,
  4196. asection **local_sections)
  4197. {
  4198. Elf_Internal_Shdr *symtab_hdr;
  4199. struct elf_link_hash_entry **sym_hashes;
  4200. Elf_Internal_Rela *rel, *relend;
  4201. struct spu_link_hash_table *htab;
  4202. asection *ea;
  4203. int ret = true;
  4204. bool emit_these_relocs = false;
  4205. bool is_ea_sym;
  4206. bool stubs;
  4207. unsigned int iovl = 0;
  4208. htab = spu_hash_table (info);
  4209. stubs = (htab->stub_sec != NULL
  4210. && maybe_needs_stubs (input_section));
  4211. iovl = overlay_index (input_section);
  4212. ea = bfd_get_section_by_name (output_bfd, "._ea");
  4213. symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
  4214. sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
  4215. rel = relocs;
  4216. relend = relocs + input_section->reloc_count;
  4217. for (; rel < relend; rel++)
  4218. {
  4219. int r_type;
  4220. reloc_howto_type *howto;
  4221. unsigned int r_symndx;
  4222. Elf_Internal_Sym *sym;
  4223. asection *sec;
  4224. struct elf_link_hash_entry *h;
  4225. const char *sym_name;
  4226. bfd_vma relocation;
  4227. bfd_vma addend;
  4228. bfd_reloc_status_type r;
  4229. bool unresolved_reloc;
  4230. enum _stub_type stub_type;
  4231. r_symndx = ELF32_R_SYM (rel->r_info);
  4232. r_type = ELF32_R_TYPE (rel->r_info);
  4233. howto = elf_howto_table + r_type;
  4234. unresolved_reloc = false;
  4235. h = NULL;
  4236. sym = NULL;
  4237. sec = NULL;
  4238. if (r_symndx < symtab_hdr->sh_info)
  4239. {
  4240. sym = local_syms + r_symndx;
  4241. sec = local_sections[r_symndx];
  4242. sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
  4243. relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
  4244. }
  4245. else
  4246. {
  4247. if (sym_hashes == NULL)
  4248. return false;
  4249. h = sym_hashes[r_symndx - symtab_hdr->sh_info];
  4250. if (info->wrap_hash != NULL
  4251. && (input_section->flags & SEC_DEBUGGING) != 0)
  4252. h = ((struct elf_link_hash_entry *)
  4253. unwrap_hash_lookup (info, input_bfd, &h->root));
  4254. while (h->root.type == bfd_link_hash_indirect
  4255. || h->root.type == bfd_link_hash_warning)
  4256. h = (struct elf_link_hash_entry *) h->root.u.i.link;
  4257. relocation = 0;
  4258. if (h->root.type == bfd_link_hash_defined
  4259. || h->root.type == bfd_link_hash_defweak)
  4260. {
  4261. sec = h->root.u.def.section;
  4262. if (sec == NULL
  4263. || sec->output_section == NULL)
  4264. /* Set a flag that will be cleared later if we find a
  4265. relocation value for this symbol. output_section
  4266. is typically NULL for symbols satisfied by a shared
  4267. library. */
  4268. unresolved_reloc = true;
  4269. else
  4270. relocation = (h->root.u.def.value
  4271. + sec->output_section->vma
  4272. + sec->output_offset);
  4273. }
  4274. else if (h->root.type == bfd_link_hash_undefweak)
  4275. ;
  4276. else if (info->unresolved_syms_in_objects == RM_IGNORE
  4277. && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
  4278. ;
  4279. else if (!bfd_link_relocatable (info)
  4280. && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
  4281. {
  4282. bool err;
  4283. err = (info->unresolved_syms_in_objects == RM_DIAGNOSE
  4284. && !info->warn_unresolved_syms)
  4285. || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT;
  4286. info->callbacks->undefined_symbol
  4287. (info, h->root.root.string, input_bfd,
  4288. input_section, rel->r_offset, err);
  4289. }
  4290. sym_name = h->root.root.string;
  4291. }
  4292. if (sec != NULL && discarded_section (sec))
  4293. RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
  4294. rel, 1, relend, howto, 0, contents);
  4295. if (bfd_link_relocatable (info))
  4296. continue;
  4297. /* Change "a rt,ra,rb" to "ai rt,ra,0". */
  4298. if (r_type == R_SPU_ADD_PIC
  4299. && h != NULL
  4300. && !(h->def_regular || ELF_COMMON_DEF_P (h)))
  4301. {
  4302. bfd_byte *loc = contents + rel->r_offset;
  4303. loc[0] = 0x1c;
  4304. loc[1] = 0x00;
  4305. loc[2] &= 0x3f;
  4306. }
  4307. is_ea_sym = (ea != NULL
  4308. && sec != NULL
  4309. && sec->output_section == ea);
  4310. /* If this symbol is in an overlay area, we may need to relocate
  4311. to the overlay stub. */
  4312. addend = rel->r_addend;
  4313. if (stubs
  4314. && !is_ea_sym
  4315. && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
  4316. contents, info)) != no_stub)
  4317. {
  4318. unsigned int ovl = 0;
  4319. struct got_entry *g, **head;
  4320. if (stub_type != nonovl_stub)
  4321. ovl = iovl;
  4322. if (h != NULL)
  4323. head = &h->got.glist;
  4324. else
  4325. head = elf_local_got_ents (input_bfd) + r_symndx;
  4326. for (g = *head; g != NULL; g = g->next)
  4327. if (htab->params->ovly_flavour == ovly_soft_icache
  4328. ? (g->ovl == ovl
  4329. && g->br_addr == (rel->r_offset
  4330. + input_section->output_offset
  4331. + input_section->output_section->vma))
  4332. : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
  4333. break;
  4334. if (g == NULL)
  4335. abort ();
  4336. relocation = g->stub_addr;
  4337. addend = 0;
  4338. }
  4339. else
  4340. {
  4341. /* For soft icache, encode the overlay index into addresses. */
  4342. if (htab->params->ovly_flavour == ovly_soft_icache
  4343. && (r_type == R_SPU_ADDR16_HI
  4344. || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
  4345. && !is_ea_sym)
  4346. {
  4347. unsigned int ovl = overlay_index (sec);
  4348. if (ovl != 0)
  4349. {
  4350. unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
  4351. relocation += set_id << 18;
  4352. }
  4353. }
  4354. }
  4355. if (htab->params->emit_fixups && !bfd_link_relocatable (info)
  4356. && (input_section->flags & SEC_ALLOC) != 0
  4357. && r_type == R_SPU_ADDR32)
  4358. {
  4359. bfd_vma offset;
  4360. offset = rel->r_offset + input_section->output_section->vma
  4361. + input_section->output_offset;
  4362. spu_elf_emit_fixup (output_bfd, info, offset);
  4363. }
  4364. if (unresolved_reloc)
  4365. ;
  4366. else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
  4367. {
  4368. if (is_ea_sym)
  4369. {
  4370. /* ._ea is a special section that isn't allocated in SPU
  4371. memory, but rather occupies space in PPU memory as
  4372. part of an embedded ELF image. If this reloc is
  4373. against a symbol defined in ._ea, then transform the
  4374. reloc into an equivalent one without a symbol
  4375. relative to the start of the ELF image. */
  4376. rel->r_addend += (relocation
  4377. - ea->vma
  4378. + elf_section_data (ea)->this_hdr.sh_offset);
  4379. rel->r_info = ELF32_R_INFO (0, r_type);
  4380. }
  4381. emit_these_relocs = true;
  4382. continue;
  4383. }
  4384. else if (is_ea_sym)
  4385. unresolved_reloc = true;
  4386. if (unresolved_reloc
  4387. && _bfd_elf_section_offset (output_bfd, info, input_section,
  4388. rel->r_offset) != (bfd_vma) -1)
  4389. {
  4390. _bfd_error_handler
  4391. /* xgettext:c-format */
  4392. (_("%pB(%s+%#" PRIx64 "): "
  4393. "unresolvable %s relocation against symbol `%s'"),
  4394. input_bfd,
  4395. bfd_section_name (input_section),
  4396. (uint64_t) rel->r_offset,
  4397. howto->name,
  4398. sym_name);
  4399. ret = false;
  4400. }
  4401. r = _bfd_final_link_relocate (howto,
  4402. input_bfd,
  4403. input_section,
  4404. contents,
  4405. rel->r_offset, relocation, addend);
  4406. if (r != bfd_reloc_ok)
  4407. {
  4408. const char *msg = (const char *) 0;
  4409. switch (r)
  4410. {
  4411. case bfd_reloc_overflow:
  4412. (*info->callbacks->reloc_overflow)
  4413. (info, (h ? &h->root : NULL), sym_name, howto->name,
  4414. (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
  4415. break;
  4416. case bfd_reloc_undefined:
  4417. (*info->callbacks->undefined_symbol)
  4418. (info, sym_name, input_bfd, input_section, rel->r_offset, true);
  4419. break;
  4420. case bfd_reloc_outofrange:
  4421. msg = _("internal error: out of range error");
  4422. goto common_error;
  4423. case bfd_reloc_notsupported:
  4424. msg = _("internal error: unsupported relocation error");
  4425. goto common_error;
  4426. case bfd_reloc_dangerous:
  4427. msg = _("internal error: dangerous error");
  4428. goto common_error;
  4429. default:
  4430. msg = _("internal error: unknown error");
  4431. /* fall through */
  4432. common_error:
  4433. ret = false;
  4434. (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
  4435. input_section, rel->r_offset);
  4436. break;
  4437. }
  4438. }
  4439. }
  4440. if (ret
  4441. && emit_these_relocs
  4442. && !info->emitrelocations)
  4443. {
  4444. Elf_Internal_Rela *wrel;
  4445. Elf_Internal_Shdr *rel_hdr;
  4446. wrel = rel = relocs;
  4447. relend = relocs + input_section->reloc_count;
  4448. for (; rel < relend; rel++)
  4449. {
  4450. int r_type;
  4451. r_type = ELF32_R_TYPE (rel->r_info);
  4452. if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
  4453. *wrel++ = *rel;
  4454. }
  4455. input_section->reloc_count = wrel - relocs;
  4456. /* Backflips for _bfd_elf_link_output_relocs. */
  4457. rel_hdr = _bfd_elf_single_rel_hdr (input_section);
  4458. rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
  4459. ret = 2;
  4460. }
  4461. return ret;
  4462. }
  4463. static bool
  4464. spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
  4465. struct bfd_link_info *info ATTRIBUTE_UNUSED)
  4466. {
  4467. return true;
  4468. }
  4469. /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
  4470. static int
  4471. spu_elf_output_symbol_hook (struct bfd_link_info *info,
  4472. const char *sym_name ATTRIBUTE_UNUSED,
  4473. Elf_Internal_Sym *sym,
  4474. asection *sym_sec ATTRIBUTE_UNUSED,
  4475. struct elf_link_hash_entry *h)
  4476. {
  4477. struct spu_link_hash_table *htab = spu_hash_table (info);
  4478. if (!bfd_link_relocatable (info)
  4479. && htab->stub_sec != NULL
  4480. && h != NULL
  4481. && (h->root.type == bfd_link_hash_defined
  4482. || h->root.type == bfd_link_hash_defweak)
  4483. && h->def_regular
  4484. && startswith (h->root.root.string, "_SPUEAR_"))
  4485. {
  4486. struct got_entry *g;
  4487. for (g = h->got.glist; g != NULL; g = g->next)
  4488. if (htab->params->ovly_flavour == ovly_soft_icache
  4489. ? g->br_addr == g->stub_addr
  4490. : g->addend == 0 && g->ovl == 0)
  4491. {
  4492. sym->st_shndx = (_bfd_elf_section_from_bfd_section
  4493. (htab->stub_sec[0]->output_section->owner,
  4494. htab->stub_sec[0]->output_section));
  4495. sym->st_value = g->stub_addr;
  4496. break;
  4497. }
  4498. }
  4499. return 1;
  4500. }
  4501. static int spu_plugin = 0;
  4502. void
  4503. spu_elf_plugin (int val)
  4504. {
  4505. spu_plugin = val;
  4506. }
  4507. /* Set ELF header e_type for plugins. */
  4508. static bool
  4509. spu_elf_init_file_header (bfd *abfd, struct bfd_link_info *info)
  4510. {
  4511. if (!_bfd_elf_init_file_header (abfd, info))
  4512. return false;
  4513. if (spu_plugin)
  4514. {
  4515. Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
  4516. i_ehdrp->e_type = ET_DYN;
  4517. }
  4518. return true;
  4519. }
  4520. /* We may add an extra PT_LOAD segment for .toe. We also need extra
  4521. segments for overlays. */
  4522. static int
  4523. spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
  4524. {
  4525. int extra = 0;
  4526. asection *sec;
  4527. if (info != NULL)
  4528. {
  4529. struct spu_link_hash_table *htab = spu_hash_table (info);
  4530. extra = htab->num_overlays;
  4531. }
  4532. if (extra)
  4533. ++extra;
  4534. sec = bfd_get_section_by_name (abfd, ".toe");
  4535. if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
  4536. ++extra;
  4537. return extra;
  4538. }
  4539. /* Remove .toe section from other PT_LOAD segments and put it in
  4540. a segment of its own. Put overlays in separate segments too. */
  4541. static bool
  4542. spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
  4543. {
  4544. asection *toe, *s;
  4545. struct elf_segment_map *m, *m_overlay;
  4546. struct elf_segment_map **p, **p_overlay, **first_load;
  4547. unsigned int i;
  4548. if (info == NULL)
  4549. return true;
  4550. toe = bfd_get_section_by_name (abfd, ".toe");
  4551. for (m = elf_seg_map (abfd); m != NULL; m = m->next)
  4552. if (m->p_type == PT_LOAD && m->count > 1)
  4553. for (i = 0; i < m->count; i++)
  4554. if ((s = m->sections[i]) == toe
  4555. || spu_elf_section_data (s)->u.o.ovl_index != 0)
  4556. {
  4557. struct elf_segment_map *m2;
  4558. bfd_vma amt;
  4559. if (i + 1 < m->count)
  4560. {
  4561. amt = sizeof (struct elf_segment_map);
  4562. amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
  4563. m2 = bfd_zalloc (abfd, amt);
  4564. if (m2 == NULL)
  4565. return false;
  4566. m2->count = m->count - (i + 1);
  4567. memcpy (m2->sections, m->sections + i + 1,
  4568. m2->count * sizeof (m->sections[0]));
  4569. m2->p_type = PT_LOAD;
  4570. m2->next = m->next;
  4571. m->next = m2;
  4572. }
  4573. m->count = 1;
  4574. if (i != 0)
  4575. {
  4576. m->count = i;
  4577. amt = sizeof (struct elf_segment_map);
  4578. m2 = bfd_zalloc (abfd, amt);
  4579. if (m2 == NULL)
  4580. return false;
  4581. m2->p_type = PT_LOAD;
  4582. m2->count = 1;
  4583. m2->sections[0] = s;
  4584. m2->next = m->next;
  4585. m->next = m2;
  4586. }
  4587. break;
  4588. }
  4589. /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
  4590. PT_LOAD segments. This can cause the .ovl.init section to be
  4591. overwritten with the contents of some overlay segment. To work
  4592. around this issue, we ensure that all PF_OVERLAY segments are
  4593. sorted first amongst the program headers; this ensures that even
  4594. with a broken loader, the .ovl.init section (which is not marked
  4595. as PF_OVERLAY) will be placed into SPU local store on startup. */
  4596. /* Move all overlay segments onto a separate list. */
  4597. p = &elf_seg_map (abfd);
  4598. p_overlay = &m_overlay;
  4599. m_overlay = NULL;
  4600. first_load = NULL;
  4601. while (*p != NULL)
  4602. {
  4603. if ((*p)->p_type == PT_LOAD)
  4604. {
  4605. if (!first_load)
  4606. first_load = p;
  4607. if ((*p)->count == 1
  4608. && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
  4609. {
  4610. m = *p;
  4611. m->no_sort_lma = 1;
  4612. *p = m->next;
  4613. *p_overlay = m;
  4614. p_overlay = &m->next;
  4615. continue;
  4616. }
  4617. }
  4618. p = &((*p)->next);
  4619. }
  4620. /* Re-insert overlay segments at the head of the segment map. */
  4621. if (m_overlay != NULL)
  4622. {
  4623. p = first_load;
  4624. if (*p != NULL && (*p)->p_type == PT_LOAD && (*p)->includes_filehdr)
  4625. /* It doesn't really make sense for someone to include the ELF
  4626. file header into an spu image, but if they do the code that
  4627. assigns p_offset needs to see the segment containing the
  4628. header first. */
  4629. p = &(*p)->next;
  4630. *p_overlay = *p;
  4631. *p = m_overlay;
  4632. }
  4633. return true;
  4634. }
  4635. /* Tweak the section type of .note.spu_name. */
  4636. static bool
  4637. spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
  4638. Elf_Internal_Shdr *hdr,
  4639. asection *sec)
  4640. {
  4641. if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
  4642. hdr->sh_type = SHT_NOTE;
  4643. return true;
  4644. }
  4645. /* Tweak phdrs before writing them out. */
  4646. static bool
  4647. spu_elf_modify_headers (bfd *abfd, struct bfd_link_info *info)
  4648. {
  4649. if (info != NULL)
  4650. {
  4651. const struct elf_backend_data *bed;
  4652. struct elf_obj_tdata *tdata;
  4653. Elf_Internal_Phdr *phdr, *last;
  4654. struct spu_link_hash_table *htab;
  4655. unsigned int count;
  4656. unsigned int i;
  4657. bed = get_elf_backend_data (abfd);
  4658. tdata = elf_tdata (abfd);
  4659. phdr = tdata->phdr;
  4660. count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
  4661. htab = spu_hash_table (info);
  4662. if (htab->num_overlays != 0)
  4663. {
  4664. struct elf_segment_map *m;
  4665. unsigned int o;
  4666. for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
  4667. if (m->count != 0
  4668. && ((o = spu_elf_section_data (m->sections[0])->u.o.ovl_index)
  4669. != 0))
  4670. {
  4671. /* Mark this as an overlay header. */
  4672. phdr[i].p_flags |= PF_OVERLAY;
  4673. if (htab->ovtab != NULL && htab->ovtab->size != 0
  4674. && htab->params->ovly_flavour != ovly_soft_icache)
  4675. {
  4676. bfd_byte *p = htab->ovtab->contents;
  4677. unsigned int off = o * 16 + 8;
  4678. /* Write file_off into _ovly_table. */
  4679. bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
  4680. }
  4681. }
  4682. /* Soft-icache has its file offset put in .ovl.init. */
  4683. if (htab->init != NULL && htab->init->size != 0)
  4684. {
  4685. bfd_vma val
  4686. = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
  4687. bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
  4688. }
  4689. }
  4690. /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
  4691. of 16. This should always be possible when using the standard
  4692. linker scripts, but don't create overlapping segments if
  4693. someone is playing games with linker scripts. */
  4694. last = NULL;
  4695. for (i = count; i-- != 0; )
  4696. if (phdr[i].p_type == PT_LOAD)
  4697. {
  4698. unsigned adjust;
  4699. adjust = -phdr[i].p_filesz & 15;
  4700. if (adjust != 0
  4701. && last != NULL
  4702. && (phdr[i].p_offset + phdr[i].p_filesz
  4703. > last->p_offset - adjust))
  4704. break;
  4705. adjust = -phdr[i].p_memsz & 15;
  4706. if (adjust != 0
  4707. && last != NULL
  4708. && phdr[i].p_filesz != 0
  4709. && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
  4710. && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
  4711. break;
  4712. if (phdr[i].p_filesz != 0)
  4713. last = &phdr[i];
  4714. }
  4715. if (i == (unsigned int) -1)
  4716. for (i = count; i-- != 0; )
  4717. if (phdr[i].p_type == PT_LOAD)
  4718. {
  4719. unsigned adjust;
  4720. adjust = -phdr[i].p_filesz & 15;
  4721. phdr[i].p_filesz += adjust;
  4722. adjust = -phdr[i].p_memsz & 15;
  4723. phdr[i].p_memsz += adjust;
  4724. }
  4725. }
  4726. return _bfd_elf_modify_headers (abfd, info);
  4727. }
  4728. bool
  4729. spu_elf_size_sections (bfd *obfd ATTRIBUTE_UNUSED, struct bfd_link_info *info)
  4730. {
  4731. struct spu_link_hash_table *htab = spu_hash_table (info);
  4732. if (htab->params->emit_fixups)
  4733. {
  4734. asection *sfixup = htab->sfixup;
  4735. int fixup_count = 0;
  4736. bfd *ibfd;
  4737. size_t size;
  4738. for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
  4739. {
  4740. asection *isec;
  4741. if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
  4742. continue;
  4743. /* Walk over each section attached to the input bfd. */
  4744. for (isec = ibfd->sections; isec != NULL; isec = isec->next)
  4745. {
  4746. Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
  4747. bfd_vma base_end;
  4748. /* If there aren't any relocs, then there's nothing more
  4749. to do. */
  4750. if ((isec->flags & SEC_ALLOC) == 0
  4751. || (isec->flags & SEC_RELOC) == 0
  4752. || isec->reloc_count == 0)
  4753. continue;
  4754. /* Get the relocs. */
  4755. internal_relocs =
  4756. _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
  4757. info->keep_memory);
  4758. if (internal_relocs == NULL)
  4759. return false;
  4760. /* 1 quadword can contain up to 4 R_SPU_ADDR32
  4761. relocations. They are stored in a single word by
  4762. saving the upper 28 bits of the address and setting the
  4763. lower 4 bits to a bit mask of the words that have the
  4764. relocation. BASE_END keeps track of the next quadword. */
  4765. irela = internal_relocs;
  4766. irelaend = irela + isec->reloc_count;
  4767. base_end = 0;
  4768. for (; irela < irelaend; irela++)
  4769. if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
  4770. && irela->r_offset >= base_end)
  4771. {
  4772. base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
  4773. fixup_count++;
  4774. }
  4775. }
  4776. }
  4777. /* We always have a NULL fixup as a sentinel */
  4778. size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
  4779. if (!bfd_set_section_size (sfixup, size))
  4780. return false;
  4781. sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
  4782. if (sfixup->contents == NULL)
  4783. return false;
  4784. }
  4785. return true;
  4786. }
  4787. #define TARGET_BIG_SYM spu_elf32_vec
  4788. #define TARGET_BIG_NAME "elf32-spu"
  4789. #define ELF_ARCH bfd_arch_spu
  4790. #define ELF_TARGET_ID SPU_ELF_DATA
  4791. #define ELF_MACHINE_CODE EM_SPU
  4792. /* This matches the alignment need for DMA. */
  4793. #define ELF_MAXPAGESIZE 0x80
  4794. #define elf_backend_rela_normal 1
  4795. #define elf_backend_can_gc_sections 1
  4796. #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
  4797. #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
  4798. #define elf_info_to_howto spu_elf_info_to_howto
  4799. #define elf_backend_count_relocs spu_elf_count_relocs
  4800. #define elf_backend_relocate_section spu_elf_relocate_section
  4801. #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
  4802. #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
  4803. #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
  4804. #define elf_backend_object_p spu_elf_object_p
  4805. #define bfd_elf32_new_section_hook spu_elf_new_section_hook
  4806. #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
  4807. #define elf_backend_additional_program_headers spu_elf_additional_program_headers
  4808. #define elf_backend_modify_segment_map spu_elf_modify_segment_map
  4809. #define elf_backend_modify_headers spu_elf_modify_headers
  4810. #define elf_backend_init_file_header spu_elf_init_file_header
  4811. #define elf_backend_fake_sections spu_elf_fake_sections
  4812. #define elf_backend_special_sections spu_elf_special_sections
  4813. #define bfd_elf32_bfd_final_link spu_elf_final_link
  4814. #include "elf32-target.h"