infrun.c 317 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911
  1. /* Target-struct-independent code to start (run) and stop an inferior
  2. process.
  3. Copyright (C) 1986-2022 Free Software Foundation, Inc.
  4. This file is part of GDB.
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. #include "defs.h"
  16. #include "displaced-stepping.h"
  17. #include "infrun.h"
  18. #include <ctype.h>
  19. #include "symtab.h"
  20. #include "frame.h"
  21. #include "inferior.h"
  22. #include "breakpoint.h"
  23. #include "gdbcore.h"
  24. #include "gdbcmd.h"
  25. #include "target.h"
  26. #include "target-connection.h"
  27. #include "gdbthread.h"
  28. #include "annotate.h"
  29. #include "symfile.h"
  30. #include "top.h"
  31. #include "inf-loop.h"
  32. #include "regcache.h"
  33. #include "value.h"
  34. #include "observable.h"
  35. #include "language.h"
  36. #include "solib.h"
  37. #include "main.h"
  38. #include "block.h"
  39. #include "mi/mi-common.h"
  40. #include "event-top.h"
  41. #include "record.h"
  42. #include "record-full.h"
  43. #include "inline-frame.h"
  44. #include "jit.h"
  45. #include "tracepoint.h"
  46. #include "skip.h"
  47. #include "probe.h"
  48. #include "objfiles.h"
  49. #include "completer.h"
  50. #include "target-descriptions.h"
  51. #include "target-dcache.h"
  52. #include "terminal.h"
  53. #include "solist.h"
  54. #include "gdbsupport/event-loop.h"
  55. #include "thread-fsm.h"
  56. #include "gdbsupport/enum-flags.h"
  57. #include "progspace-and-thread.h"
  58. #include "gdbsupport/gdb_optional.h"
  59. #include "arch-utils.h"
  60. #include "gdbsupport/scope-exit.h"
  61. #include "gdbsupport/forward-scope-exit.h"
  62. #include "gdbsupport/gdb_select.h"
  63. #include <unordered_map>
  64. #include "async-event.h"
  65. #include "gdbsupport/selftest.h"
  66. #include "scoped-mock-context.h"
  67. #include "test-target.h"
  68. #include "gdbsupport/common-debug.h"
  69. #include "gdbsupport/buildargv.h"
  70. /* Prototypes for local functions */
  71. static void sig_print_info (enum gdb_signal);
  72. static void sig_print_header (void);
  73. static void follow_inferior_reset_breakpoints (void);
  74. static bool currently_stepping (struct thread_info *tp);
  75. static void insert_hp_step_resume_breakpoint_at_frame (struct frame_info *);
  76. static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
  77. static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
  78. static bool maybe_software_singlestep (struct gdbarch *gdbarch);
  79. static void resume (gdb_signal sig);
  80. static void wait_for_inferior (inferior *inf);
  81. static void restart_threads (struct thread_info *event_thread,
  82. inferior *inf = nullptr);
  83. static bool start_step_over (void);
  84. /* Asynchronous signal handler registered as event loop source for
  85. when we have pending events ready to be passed to the core. */
  86. static struct async_event_handler *infrun_async_inferior_event_token;
  87. /* Stores whether infrun_async was previously enabled or disabled.
  88. Starts off as -1, indicating "never enabled/disabled". */
  89. static int infrun_is_async = -1;
  90. /* See infrun.h. */
  91. void
  92. infrun_async (int enable)
  93. {
  94. if (infrun_is_async != enable)
  95. {
  96. infrun_is_async = enable;
  97. infrun_debug_printf ("enable=%d", enable);
  98. if (enable)
  99. mark_async_event_handler (infrun_async_inferior_event_token);
  100. else
  101. clear_async_event_handler (infrun_async_inferior_event_token);
  102. }
  103. }
  104. /* See infrun.h. */
  105. void
  106. mark_infrun_async_event_handler (void)
  107. {
  108. mark_async_event_handler (infrun_async_inferior_event_token);
  109. }
  110. /* When set, stop the 'step' command if we enter a function which has
  111. no line number information. The normal behavior is that we step
  112. over such function. */
  113. bool step_stop_if_no_debug = false;
  114. static void
  115. show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
  116. struct cmd_list_element *c, const char *value)
  117. {
  118. gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
  119. }
  120. /* proceed and normal_stop use this to notify the user when the
  121. inferior stopped in a different thread than it had been running
  122. in. */
  123. static ptid_t previous_inferior_ptid;
  124. /* If set (default for legacy reasons), when following a fork, GDB
  125. will detach from one of the fork branches, child or parent.
  126. Exactly which branch is detached depends on 'set follow-fork-mode'
  127. setting. */
  128. static bool detach_fork = true;
  129. bool debug_infrun = false;
  130. static void
  131. show_debug_infrun (struct ui_file *file, int from_tty,
  132. struct cmd_list_element *c, const char *value)
  133. {
  134. gdb_printf (file, _("Inferior debugging is %s.\n"), value);
  135. }
  136. /* Support for disabling address space randomization. */
  137. bool disable_randomization = true;
  138. static void
  139. show_disable_randomization (struct ui_file *file, int from_tty,
  140. struct cmd_list_element *c, const char *value)
  141. {
  142. if (target_supports_disable_randomization ())
  143. gdb_printf (file,
  144. _("Disabling randomization of debuggee's "
  145. "virtual address space is %s.\n"),
  146. value);
  147. else
  148. gdb_puts (_("Disabling randomization of debuggee's "
  149. "virtual address space is unsupported on\n"
  150. "this platform.\n"), file);
  151. }
  152. static void
  153. set_disable_randomization (const char *args, int from_tty,
  154. struct cmd_list_element *c)
  155. {
  156. if (!target_supports_disable_randomization ())
  157. error (_("Disabling randomization of debuggee's "
  158. "virtual address space is unsupported on\n"
  159. "this platform."));
  160. }
  161. /* User interface for non-stop mode. */
  162. bool non_stop = false;
  163. static bool non_stop_1 = false;
  164. static void
  165. set_non_stop (const char *args, int from_tty,
  166. struct cmd_list_element *c)
  167. {
  168. if (target_has_execution ())
  169. {
  170. non_stop_1 = non_stop;
  171. error (_("Cannot change this setting while the inferior is running."));
  172. }
  173. non_stop = non_stop_1;
  174. }
  175. static void
  176. show_non_stop (struct ui_file *file, int from_tty,
  177. struct cmd_list_element *c, const char *value)
  178. {
  179. gdb_printf (file,
  180. _("Controlling the inferior in non-stop mode is %s.\n"),
  181. value);
  182. }
  183. /* "Observer mode" is somewhat like a more extreme version of
  184. non-stop, in which all GDB operations that might affect the
  185. target's execution have been disabled. */
  186. static bool observer_mode = false;
  187. static bool observer_mode_1 = false;
  188. static void
  189. set_observer_mode (const char *args, int from_tty,
  190. struct cmd_list_element *c)
  191. {
  192. if (target_has_execution ())
  193. {
  194. observer_mode_1 = observer_mode;
  195. error (_("Cannot change this setting while the inferior is running."));
  196. }
  197. observer_mode = observer_mode_1;
  198. may_write_registers = !observer_mode;
  199. may_write_memory = !observer_mode;
  200. may_insert_breakpoints = !observer_mode;
  201. may_insert_tracepoints = !observer_mode;
  202. /* We can insert fast tracepoints in or out of observer mode,
  203. but enable them if we're going into this mode. */
  204. if (observer_mode)
  205. may_insert_fast_tracepoints = true;
  206. may_stop = !observer_mode;
  207. update_target_permissions ();
  208. /* Going *into* observer mode we must force non-stop, then
  209. going out we leave it that way. */
  210. if (observer_mode)
  211. {
  212. pagination_enabled = 0;
  213. non_stop = non_stop_1 = true;
  214. }
  215. if (from_tty)
  216. gdb_printf (_("Observer mode is now %s.\n"),
  217. (observer_mode ? "on" : "off"));
  218. }
  219. static void
  220. show_observer_mode (struct ui_file *file, int from_tty,
  221. struct cmd_list_element *c, const char *value)
  222. {
  223. gdb_printf (file, _("Observer mode is %s.\n"), value);
  224. }
  225. /* This updates the value of observer mode based on changes in
  226. permissions. Note that we are deliberately ignoring the values of
  227. may-write-registers and may-write-memory, since the user may have
  228. reason to enable these during a session, for instance to turn on a
  229. debugging-related global. */
  230. void
  231. update_observer_mode (void)
  232. {
  233. bool newval = (!may_insert_breakpoints
  234. && !may_insert_tracepoints
  235. && may_insert_fast_tracepoints
  236. && !may_stop
  237. && non_stop);
  238. /* Let the user know if things change. */
  239. if (newval != observer_mode)
  240. gdb_printf (_("Observer mode is now %s.\n"),
  241. (newval ? "on" : "off"));
  242. observer_mode = observer_mode_1 = newval;
  243. }
  244. /* Tables of how to react to signals; the user sets them. */
  245. static unsigned char signal_stop[GDB_SIGNAL_LAST];
  246. static unsigned char signal_print[GDB_SIGNAL_LAST];
  247. static unsigned char signal_program[GDB_SIGNAL_LAST];
  248. /* Table of signals that are registered with "catch signal". A
  249. non-zero entry indicates that the signal is caught by some "catch
  250. signal" command. */
  251. static unsigned char signal_catch[GDB_SIGNAL_LAST];
  252. /* Table of signals that the target may silently handle.
  253. This is automatically determined from the flags above,
  254. and simply cached here. */
  255. static unsigned char signal_pass[GDB_SIGNAL_LAST];
  256. #define SET_SIGS(nsigs,sigs,flags) \
  257. do { \
  258. int signum = (nsigs); \
  259. while (signum-- > 0) \
  260. if ((sigs)[signum]) \
  261. (flags)[signum] = 1; \
  262. } while (0)
  263. #define UNSET_SIGS(nsigs,sigs,flags) \
  264. do { \
  265. int signum = (nsigs); \
  266. while (signum-- > 0) \
  267. if ((sigs)[signum]) \
  268. (flags)[signum] = 0; \
  269. } while (0)
  270. /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
  271. this function is to avoid exporting `signal_program'. */
  272. void
  273. update_signals_program_target (void)
  274. {
  275. target_program_signals (signal_program);
  276. }
  277. /* Value to pass to target_resume() to cause all threads to resume. */
  278. #define RESUME_ALL minus_one_ptid
  279. /* Command list pointer for the "stop" placeholder. */
  280. static struct cmd_list_element *stop_command;
  281. /* Nonzero if we want to give control to the user when we're notified
  282. of shared library events by the dynamic linker. */
  283. int stop_on_solib_events;
  284. /* Enable or disable optional shared library event breakpoints
  285. as appropriate when the above flag is changed. */
  286. static void
  287. set_stop_on_solib_events (const char *args,
  288. int from_tty, struct cmd_list_element *c)
  289. {
  290. update_solib_breakpoints ();
  291. }
  292. static void
  293. show_stop_on_solib_events (struct ui_file *file, int from_tty,
  294. struct cmd_list_element *c, const char *value)
  295. {
  296. gdb_printf (file, _("Stopping for shared library events is %s.\n"),
  297. value);
  298. }
  299. /* True after stop if current stack frame should be printed. */
  300. static bool stop_print_frame;
  301. /* This is a cached copy of the target/ptid/waitstatus of the last
  302. event returned by target_wait().
  303. This information is returned by get_last_target_status(). */
  304. static process_stratum_target *target_last_proc_target;
  305. static ptid_t target_last_wait_ptid;
  306. static struct target_waitstatus target_last_waitstatus;
  307. void init_thread_stepping_state (struct thread_info *tss);
  308. static const char follow_fork_mode_child[] = "child";
  309. static const char follow_fork_mode_parent[] = "parent";
  310. static const char *const follow_fork_mode_kind_names[] = {
  311. follow_fork_mode_child,
  312. follow_fork_mode_parent,
  313. NULL
  314. };
  315. static const char *follow_fork_mode_string = follow_fork_mode_parent;
  316. static void
  317. show_follow_fork_mode_string (struct ui_file *file, int from_tty,
  318. struct cmd_list_element *c, const char *value)
  319. {
  320. gdb_printf (file,
  321. _("Debugger response to a program "
  322. "call of fork or vfork is \"%s\".\n"),
  323. value);
  324. }
  325. /* Handle changes to the inferior list based on the type of fork,
  326. which process is being followed, and whether the other process
  327. should be detached. On entry inferior_ptid must be the ptid of
  328. the fork parent. At return inferior_ptid is the ptid of the
  329. followed inferior. */
  330. static bool
  331. follow_fork_inferior (bool follow_child, bool detach_fork)
  332. {
  333. target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
  334. gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
  335. || fork_kind == TARGET_WAITKIND_VFORKED);
  336. bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
  337. ptid_t parent_ptid = inferior_ptid;
  338. ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
  339. if (has_vforked
  340. && !non_stop /* Non-stop always resumes both branches. */
  341. && current_ui->prompt_state == PROMPT_BLOCKED
  342. && !(follow_child || detach_fork || sched_multi))
  343. {
  344. /* The parent stays blocked inside the vfork syscall until the
  345. child execs or exits. If we don't let the child run, then
  346. the parent stays blocked. If we're telling the parent to run
  347. in the foreground, the user will not be able to ctrl-c to get
  348. back the terminal, effectively hanging the debug session. */
  349. gdb_printf (gdb_stderr, _("\
  350. Can not resume the parent process over vfork in the foreground while\n\
  351. holding the child stopped. Try \"set detach-on-fork\" or \
  352. \"set schedule-multiple\".\n"));
  353. return true;
  354. }
  355. inferior *parent_inf = current_inferior ();
  356. inferior *child_inf = nullptr;
  357. gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
  358. if (!follow_child)
  359. {
  360. /* Detach new forked process? */
  361. if (detach_fork)
  362. {
  363. /* Before detaching from the child, remove all breakpoints
  364. from it. If we forked, then this has already been taken
  365. care of by infrun.c. If we vforked however, any
  366. breakpoint inserted in the parent is visible in the
  367. child, even those added while stopped in a vfork
  368. catchpoint. This will remove the breakpoints from the
  369. parent also, but they'll be reinserted below. */
  370. if (has_vforked)
  371. {
  372. /* Keep breakpoints list in sync. */
  373. remove_breakpoints_inf (current_inferior ());
  374. }
  375. if (print_inferior_events)
  376. {
  377. /* Ensure that we have a process ptid. */
  378. ptid_t process_ptid = ptid_t (child_ptid.pid ());
  379. target_terminal::ours_for_output ();
  380. gdb_printf (_("[Detaching after %s from child %s]\n"),
  381. has_vforked ? "vfork" : "fork",
  382. target_pid_to_str (process_ptid).c_str ());
  383. }
  384. }
  385. else
  386. {
  387. /* Add process to GDB's tables. */
  388. child_inf = add_inferior (child_ptid.pid ());
  389. child_inf->attach_flag = parent_inf->attach_flag;
  390. copy_terminal_info (child_inf, parent_inf);
  391. child_inf->gdbarch = parent_inf->gdbarch;
  392. copy_inferior_target_desc_info (child_inf, parent_inf);
  393. child_inf->symfile_flags = SYMFILE_NO_READ;
  394. /* If this is a vfork child, then the address-space is
  395. shared with the parent. */
  396. if (has_vforked)
  397. {
  398. child_inf->pspace = parent_inf->pspace;
  399. child_inf->aspace = parent_inf->aspace;
  400. exec_on_vfork (child_inf);
  401. /* The parent will be frozen until the child is done
  402. with the shared region. Keep track of the
  403. parent. */
  404. child_inf->vfork_parent = parent_inf;
  405. child_inf->pending_detach = 0;
  406. parent_inf->vfork_child = child_inf;
  407. parent_inf->pending_detach = 0;
  408. }
  409. else
  410. {
  411. child_inf->aspace = new_address_space ();
  412. child_inf->pspace = new program_space (child_inf->aspace);
  413. child_inf->removable = 1;
  414. clone_program_space (child_inf->pspace, parent_inf->pspace);
  415. }
  416. }
  417. if (has_vforked)
  418. {
  419. /* If we detached from the child, then we have to be careful
  420. to not insert breakpoints in the parent until the child
  421. is done with the shared memory region. However, if we're
  422. staying attached to the child, then we can and should
  423. insert breakpoints, so that we can debug it. A
  424. subsequent child exec or exit is enough to know when does
  425. the child stops using the parent's address space. */
  426. parent_inf->thread_waiting_for_vfork_done
  427. = detach_fork ? inferior_thread () : nullptr;
  428. parent_inf->pspace->breakpoints_not_allowed = detach_fork;
  429. }
  430. }
  431. else
  432. {
  433. /* Follow the child. */
  434. if (print_inferior_events)
  435. {
  436. std::string parent_pid = target_pid_to_str (parent_ptid);
  437. std::string child_pid = target_pid_to_str (child_ptid);
  438. target_terminal::ours_for_output ();
  439. gdb_printf (_("[Attaching after %s %s to child %s]\n"),
  440. parent_pid.c_str (),
  441. has_vforked ? "vfork" : "fork",
  442. child_pid.c_str ());
  443. }
  444. /* Add the new inferior first, so that the target_detach below
  445. doesn't unpush the target. */
  446. child_inf = add_inferior (child_ptid.pid ());
  447. child_inf->attach_flag = parent_inf->attach_flag;
  448. copy_terminal_info (child_inf, parent_inf);
  449. child_inf->gdbarch = parent_inf->gdbarch;
  450. copy_inferior_target_desc_info (child_inf, parent_inf);
  451. if (has_vforked)
  452. {
  453. /* If this is a vfork child, then the address-space is shared
  454. with the parent. */
  455. child_inf->aspace = parent_inf->aspace;
  456. child_inf->pspace = parent_inf->pspace;
  457. exec_on_vfork (child_inf);
  458. }
  459. else if (detach_fork)
  460. {
  461. /* We follow the child and detach from the parent: move the parent's
  462. program space to the child. This simplifies some things, like
  463. doing "next" over fork() and landing on the expected line in the
  464. child (note, that is broken with "set detach-on-fork off").
  465. Before assigning brand new spaces for the parent, remove
  466. breakpoints from it: because the new pspace won't match
  467. currently inserted locations, the normal detach procedure
  468. wouldn't remove them, and we would leave them inserted when
  469. detaching. */
  470. remove_breakpoints_inf (parent_inf);
  471. child_inf->aspace = parent_inf->aspace;
  472. child_inf->pspace = parent_inf->pspace;
  473. parent_inf->aspace = new_address_space ();
  474. parent_inf->pspace = new program_space (parent_inf->aspace);
  475. clone_program_space (parent_inf->pspace, child_inf->pspace);
  476. /* The parent inferior is still the current one, so keep things
  477. in sync. */
  478. set_current_program_space (parent_inf->pspace);
  479. }
  480. else
  481. {
  482. child_inf->aspace = new_address_space ();
  483. child_inf->pspace = new program_space (child_inf->aspace);
  484. child_inf->removable = 1;
  485. child_inf->symfile_flags = SYMFILE_NO_READ;
  486. clone_program_space (child_inf->pspace, parent_inf->pspace);
  487. }
  488. }
  489. gdb_assert (current_inferior () == parent_inf);
  490. /* If we are setting up an inferior for the child, target_follow_fork is
  491. responsible for pushing the appropriate targets on the new inferior's
  492. target stack and adding the initial thread (with ptid CHILD_PTID).
  493. If we are not setting up an inferior for the child (because following
  494. the parent and detach_fork is true), it is responsible for detaching
  495. from CHILD_PTID. */
  496. target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
  497. detach_fork);
  498. /* target_follow_fork must leave the parent as the current inferior. If we
  499. want to follow the child, we make it the current one below. */
  500. gdb_assert (current_inferior () == parent_inf);
  501. /* If there is a child inferior, target_follow_fork must have created a thread
  502. for it. */
  503. if (child_inf != nullptr)
  504. gdb_assert (!child_inf->thread_list.empty ());
  505. /* Clear the parent thread's pending follow field. Do this before calling
  506. target_detach, so that the target can differentiate the two following
  507. cases:
  508. - We continue past a fork with "follow-fork-mode == child" &&
  509. "detach-on-fork on", and therefore detach the parent. In that
  510. case the target should not detach the fork child.
  511. - We run to a fork catchpoint and the user types "detach". In that
  512. case, the target should detach the fork child in addition to the
  513. parent.
  514. The former case will have pending_follow cleared, the later will have
  515. pending_follow set. */
  516. thread_info *parent_thread = find_thread_ptid (parent_inf, parent_ptid);
  517. gdb_assert (parent_thread != nullptr);
  518. parent_thread->pending_follow.set_spurious ();
  519. /* Detach the parent if needed. */
  520. if (follow_child)
  521. {
  522. /* If we're vforking, we want to hold on to the parent until
  523. the child exits or execs. At child exec or exit time we
  524. can remove the old breakpoints from the parent and detach
  525. or resume debugging it. Otherwise, detach the parent now;
  526. we'll want to reuse it's program/address spaces, but we
  527. can't set them to the child before removing breakpoints
  528. from the parent, otherwise, the breakpoints module could
  529. decide to remove breakpoints from the wrong process (since
  530. they'd be assigned to the same address space). */
  531. if (has_vforked)
  532. {
  533. gdb_assert (child_inf->vfork_parent == NULL);
  534. gdb_assert (parent_inf->vfork_child == NULL);
  535. child_inf->vfork_parent = parent_inf;
  536. child_inf->pending_detach = 0;
  537. parent_inf->vfork_child = child_inf;
  538. parent_inf->pending_detach = detach_fork;
  539. }
  540. else if (detach_fork)
  541. {
  542. if (print_inferior_events)
  543. {
  544. /* Ensure that we have a process ptid. */
  545. ptid_t process_ptid = ptid_t (parent_ptid.pid ());
  546. target_terminal::ours_for_output ();
  547. gdb_printf (_("[Detaching after fork from "
  548. "parent %s]\n"),
  549. target_pid_to_str (process_ptid).c_str ());
  550. }
  551. target_detach (parent_inf, 0);
  552. }
  553. }
  554. /* If we ended up creating a new inferior, call post_create_inferior to inform
  555. the various subcomponents. */
  556. if (child_inf != nullptr)
  557. {
  558. /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
  559. (do not restore the parent as the current inferior). */
  560. gdb::optional<scoped_restore_current_thread> maybe_restore;
  561. if (!follow_child)
  562. maybe_restore.emplace ();
  563. switch_to_thread (*child_inf->threads ().begin ());
  564. post_create_inferior (0);
  565. }
  566. return false;
  567. }
  568. /* Tell the target to follow the fork we're stopped at. Returns true
  569. if the inferior should be resumed; false, if the target for some
  570. reason decided it's best not to resume. */
  571. static bool
  572. follow_fork ()
  573. {
  574. bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
  575. bool should_resume = true;
  576. /* Copy user stepping state to the new inferior thread. FIXME: the
  577. followed fork child thread should have a copy of most of the
  578. parent thread structure's run control related fields, not just these.
  579. Initialized to avoid "may be used uninitialized" warnings from gcc. */
  580. struct breakpoint *step_resume_breakpoint = NULL;
  581. struct breakpoint *exception_resume_breakpoint = NULL;
  582. CORE_ADDR step_range_start = 0;
  583. CORE_ADDR step_range_end = 0;
  584. int current_line = 0;
  585. symtab *current_symtab = NULL;
  586. struct frame_id step_frame_id = { 0 };
  587. if (!non_stop)
  588. {
  589. process_stratum_target *wait_target;
  590. ptid_t wait_ptid;
  591. struct target_waitstatus wait_status;
  592. /* Get the last target status returned by target_wait(). */
  593. get_last_target_status (&wait_target, &wait_ptid, &wait_status);
  594. /* If not stopped at a fork event, then there's nothing else to
  595. do. */
  596. if (wait_status.kind () != TARGET_WAITKIND_FORKED
  597. && wait_status.kind () != TARGET_WAITKIND_VFORKED)
  598. return 1;
  599. /* Check if we switched over from WAIT_PTID, since the event was
  600. reported. */
  601. if (wait_ptid != minus_one_ptid
  602. && (current_inferior ()->process_target () != wait_target
  603. || inferior_ptid != wait_ptid))
  604. {
  605. /* We did. Switch back to WAIT_PTID thread, to tell the
  606. target to follow it (in either direction). We'll
  607. afterwards refuse to resume, and inform the user what
  608. happened. */
  609. thread_info *wait_thread = find_thread_ptid (wait_target, wait_ptid);
  610. switch_to_thread (wait_thread);
  611. should_resume = false;
  612. }
  613. }
  614. thread_info *tp = inferior_thread ();
  615. /* If there were any forks/vforks that were caught and are now to be
  616. followed, then do so now. */
  617. switch (tp->pending_follow.kind ())
  618. {
  619. case TARGET_WAITKIND_FORKED:
  620. case TARGET_WAITKIND_VFORKED:
  621. {
  622. ptid_t parent, child;
  623. std::unique_ptr<struct thread_fsm> thread_fsm;
  624. /* If the user did a next/step, etc, over a fork call,
  625. preserve the stepping state in the fork child. */
  626. if (follow_child && should_resume)
  627. {
  628. step_resume_breakpoint = clone_momentary_breakpoint
  629. (tp->control.step_resume_breakpoint);
  630. step_range_start = tp->control.step_range_start;
  631. step_range_end = tp->control.step_range_end;
  632. current_line = tp->current_line;
  633. current_symtab = tp->current_symtab;
  634. step_frame_id = tp->control.step_frame_id;
  635. exception_resume_breakpoint
  636. = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
  637. thread_fsm = tp->release_thread_fsm ();
  638. /* For now, delete the parent's sr breakpoint, otherwise,
  639. parent/child sr breakpoints are considered duplicates,
  640. and the child version will not be installed. Remove
  641. this when the breakpoints module becomes aware of
  642. inferiors and address spaces. */
  643. delete_step_resume_breakpoint (tp);
  644. tp->control.step_range_start = 0;
  645. tp->control.step_range_end = 0;
  646. tp->control.step_frame_id = null_frame_id;
  647. delete_exception_resume_breakpoint (tp);
  648. }
  649. parent = inferior_ptid;
  650. child = tp->pending_follow.child_ptid ();
  651. /* If handling a vfork, stop all the inferior's threads, they will be
  652. restarted when the vfork shared region is complete. */
  653. if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
  654. && target_is_non_stop_p ())
  655. stop_all_threads ("handling vfork", tp->inf);
  656. process_stratum_target *parent_targ = tp->inf->process_target ();
  657. /* Set up inferior(s) as specified by the caller, and tell the
  658. target to do whatever is necessary to follow either parent
  659. or child. */
  660. if (follow_fork_inferior (follow_child, detach_fork))
  661. {
  662. /* Target refused to follow, or there's some other reason
  663. we shouldn't resume. */
  664. should_resume = 0;
  665. }
  666. else
  667. {
  668. /* This makes sure we don't try to apply the "Switched
  669. over from WAIT_PID" logic above. */
  670. nullify_last_target_wait_ptid ();
  671. /* If we followed the child, switch to it... */
  672. if (follow_child)
  673. {
  674. thread_info *child_thr = find_thread_ptid (parent_targ, child);
  675. switch_to_thread (child_thr);
  676. /* ... and preserve the stepping state, in case the
  677. user was stepping over the fork call. */
  678. if (should_resume)
  679. {
  680. tp = inferior_thread ();
  681. tp->control.step_resume_breakpoint
  682. = step_resume_breakpoint;
  683. tp->control.step_range_start = step_range_start;
  684. tp->control.step_range_end = step_range_end;
  685. tp->current_line = current_line;
  686. tp->current_symtab = current_symtab;
  687. tp->control.step_frame_id = step_frame_id;
  688. tp->control.exception_resume_breakpoint
  689. = exception_resume_breakpoint;
  690. tp->set_thread_fsm (std::move (thread_fsm));
  691. }
  692. else
  693. {
  694. /* If we get here, it was because we're trying to
  695. resume from a fork catchpoint, but, the user
  696. has switched threads away from the thread that
  697. forked. In that case, the resume command
  698. issued is most likely not applicable to the
  699. child, so just warn, and refuse to resume. */
  700. warning (_("Not resuming: switched threads "
  701. "before following fork child."));
  702. }
  703. /* Reset breakpoints in the child as appropriate. */
  704. follow_inferior_reset_breakpoints ();
  705. }
  706. }
  707. }
  708. break;
  709. case TARGET_WAITKIND_SPURIOUS:
  710. /* Nothing to follow. */
  711. break;
  712. default:
  713. internal_error (__FILE__, __LINE__,
  714. "Unexpected pending_follow.kind %d\n",
  715. tp->pending_follow.kind ());
  716. break;
  717. }
  718. return should_resume;
  719. }
  720. static void
  721. follow_inferior_reset_breakpoints (void)
  722. {
  723. struct thread_info *tp = inferior_thread ();
  724. /* Was there a step_resume breakpoint? (There was if the user
  725. did a "next" at the fork() call.) If so, explicitly reset its
  726. thread number. Cloned step_resume breakpoints are disabled on
  727. creation, so enable it here now that it is associated with the
  728. correct thread.
  729. step_resumes are a form of bp that are made to be per-thread.
  730. Since we created the step_resume bp when the parent process
  731. was being debugged, and now are switching to the child process,
  732. from the breakpoint package's viewpoint, that's a switch of
  733. "threads". We must update the bp's notion of which thread
  734. it is for, or it'll be ignored when it triggers. */
  735. if (tp->control.step_resume_breakpoint)
  736. {
  737. breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
  738. tp->control.step_resume_breakpoint->loc->enabled = 1;
  739. }
  740. /* Treat exception_resume breakpoints like step_resume breakpoints. */
  741. if (tp->control.exception_resume_breakpoint)
  742. {
  743. breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
  744. tp->control.exception_resume_breakpoint->loc->enabled = 1;
  745. }
  746. /* Reinsert all breakpoints in the child. The user may have set
  747. breakpoints after catching the fork, in which case those
  748. were never set in the child, but only in the parent. This makes
  749. sure the inserted breakpoints match the breakpoint list. */
  750. breakpoint_re_set ();
  751. insert_breakpoints ();
  752. }
  753. /* The child has exited or execed: resume THREAD, a thread of the parent,
  754. if it was meant to be executing. */
  755. static void
  756. proceed_after_vfork_done (thread_info *thread)
  757. {
  758. if (thread->state == THREAD_RUNNING
  759. && !thread->executing ()
  760. && !thread->stop_requested
  761. && thread->stop_signal () == GDB_SIGNAL_0)
  762. {
  763. infrun_debug_printf ("resuming vfork parent thread %s",
  764. thread->ptid.to_string ().c_str ());
  765. switch_to_thread (thread);
  766. clear_proceed_status (0);
  767. proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
  768. }
  769. }
  770. /* Called whenever we notice an exec or exit event, to handle
  771. detaching or resuming a vfork parent. */
  772. static void
  773. handle_vfork_child_exec_or_exit (int exec)
  774. {
  775. struct inferior *inf = current_inferior ();
  776. if (inf->vfork_parent)
  777. {
  778. inferior *resume_parent = nullptr;
  779. /* This exec or exit marks the end of the shared memory region
  780. between the parent and the child. Break the bonds. */
  781. inferior *vfork_parent = inf->vfork_parent;
  782. inf->vfork_parent->vfork_child = NULL;
  783. inf->vfork_parent = NULL;
  784. /* If the user wanted to detach from the parent, now is the
  785. time. */
  786. if (vfork_parent->pending_detach)
  787. {
  788. struct program_space *pspace;
  789. struct address_space *aspace;
  790. /* follow-fork child, detach-on-fork on. */
  791. vfork_parent->pending_detach = 0;
  792. scoped_restore_current_pspace_and_thread restore_thread;
  793. /* We're letting loose of the parent. */
  794. thread_info *tp = any_live_thread_of_inferior (vfork_parent);
  795. switch_to_thread (tp);
  796. /* We're about to detach from the parent, which implicitly
  797. removes breakpoints from its address space. There's a
  798. catch here: we want to reuse the spaces for the child,
  799. but, parent/child are still sharing the pspace at this
  800. point, although the exec in reality makes the kernel give
  801. the child a fresh set of new pages. The problem here is
  802. that the breakpoints module being unaware of this, would
  803. likely chose the child process to write to the parent
  804. address space. Swapping the child temporarily away from
  805. the spaces has the desired effect. Yes, this is "sort
  806. of" a hack. */
  807. pspace = inf->pspace;
  808. aspace = inf->aspace;
  809. inf->aspace = NULL;
  810. inf->pspace = NULL;
  811. if (print_inferior_events)
  812. {
  813. std::string pidstr
  814. = target_pid_to_str (ptid_t (vfork_parent->pid));
  815. target_terminal::ours_for_output ();
  816. if (exec)
  817. {
  818. gdb_printf (_("[Detaching vfork parent %s "
  819. "after child exec]\n"), pidstr.c_str ());
  820. }
  821. else
  822. {
  823. gdb_printf (_("[Detaching vfork parent %s "
  824. "after child exit]\n"), pidstr.c_str ());
  825. }
  826. }
  827. target_detach (vfork_parent, 0);
  828. /* Put it back. */
  829. inf->pspace = pspace;
  830. inf->aspace = aspace;
  831. }
  832. else if (exec)
  833. {
  834. /* We're staying attached to the parent, so, really give the
  835. child a new address space. */
  836. inf->pspace = new program_space (maybe_new_address_space ());
  837. inf->aspace = inf->pspace->aspace;
  838. inf->removable = 1;
  839. set_current_program_space (inf->pspace);
  840. resume_parent = vfork_parent;
  841. }
  842. else
  843. {
  844. /* If this is a vfork child exiting, then the pspace and
  845. aspaces were shared with the parent. Since we're
  846. reporting the process exit, we'll be mourning all that is
  847. found in the address space, and switching to null_ptid,
  848. preparing to start a new inferior. But, since we don't
  849. want to clobber the parent's address/program spaces, we
  850. go ahead and create a new one for this exiting
  851. inferior. */
  852. /* Switch to no-thread while running clone_program_space, so
  853. that clone_program_space doesn't want to read the
  854. selected frame of a dead process. */
  855. scoped_restore_current_thread restore_thread;
  856. switch_to_no_thread ();
  857. inf->pspace = new program_space (maybe_new_address_space ());
  858. inf->aspace = inf->pspace->aspace;
  859. set_current_program_space (inf->pspace);
  860. inf->removable = 1;
  861. inf->symfile_flags = SYMFILE_NO_READ;
  862. clone_program_space (inf->pspace, vfork_parent->pspace);
  863. resume_parent = vfork_parent;
  864. }
  865. gdb_assert (current_program_space == inf->pspace);
  866. if (non_stop && resume_parent != nullptr)
  867. {
  868. /* If the user wanted the parent to be running, let it go
  869. free now. */
  870. scoped_restore_current_thread restore_thread;
  871. infrun_debug_printf ("resuming vfork parent process %d",
  872. resume_parent->pid);
  873. for (thread_info *thread : resume_parent->threads ())
  874. proceed_after_vfork_done (thread);
  875. }
  876. }
  877. }
  878. /* Handle TARGET_WAITKIND_VFORK_DONE. */
  879. static void
  880. handle_vfork_done (thread_info *event_thread)
  881. {
  882. /* We only care about this event if inferior::thread_waiting_for_vfork_done is
  883. set, that is if we are waiting for a vfork child not under our control
  884. (because we detached it) to exec or exit.
  885. If an inferior has vforked and we are debugging the child, we don't use
  886. the vfork-done event to get notified about the end of the shared address
  887. space window. We rely instead on the child's exec or exit event, and the
  888. inferior::vfork_{parent,child} fields are used instead. See
  889. handle_vfork_child_exec_or_exit for that. */
  890. if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
  891. {
  892. infrun_debug_printf ("not waiting for a vfork-done event");
  893. return;
  894. }
  895. INFRUN_SCOPED_DEBUG_ENTER_EXIT;
  896. /* We stopped all threads (other than the vforking thread) of the inferior in
  897. follow_fork and kept them stopped until now. It should therefore not be
  898. possible for another thread to have reported a vfork during that window.
  899. If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
  900. vfork-done we are handling right now. */
  901. gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
  902. event_thread->inf->thread_waiting_for_vfork_done = nullptr;
  903. event_thread->inf->pspace->breakpoints_not_allowed = 0;
  904. /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
  905. resume them now. On all-stop targets, everything that needs to be resumed
  906. will be when we resume the event thread. */
  907. if (target_is_non_stop_p ())
  908. {
  909. /* restart_threads and start_step_over may change the current thread, make
  910. sure we leave the event thread as the current thread. */
  911. scoped_restore_current_thread restore_thread;
  912. insert_breakpoints ();
  913. restart_threads (event_thread, event_thread->inf);
  914. start_step_over ();
  915. }
  916. }
  917. /* Enum strings for "set|show follow-exec-mode". */
  918. static const char follow_exec_mode_new[] = "new";
  919. static const char follow_exec_mode_same[] = "same";
  920. static const char *const follow_exec_mode_names[] =
  921. {
  922. follow_exec_mode_new,
  923. follow_exec_mode_same,
  924. NULL,
  925. };
  926. static const char *follow_exec_mode_string = follow_exec_mode_same;
  927. static void
  928. show_follow_exec_mode_string (struct ui_file *file, int from_tty,
  929. struct cmd_list_element *c, const char *value)
  930. {
  931. gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
  932. }
  933. /* EXEC_FILE_TARGET is assumed to be non-NULL. */
  934. static void
  935. follow_exec (ptid_t ptid, const char *exec_file_target)
  936. {
  937. int pid = ptid.pid ();
  938. ptid_t process_ptid;
  939. /* Switch terminal for any messages produced e.g. by
  940. breakpoint_re_set. */
  941. target_terminal::ours_for_output ();
  942. /* This is an exec event that we actually wish to pay attention to.
  943. Refresh our symbol table to the newly exec'd program, remove any
  944. momentary bp's, etc.
  945. If there are breakpoints, they aren't really inserted now,
  946. since the exec() transformed our inferior into a fresh set
  947. of instructions.
  948. We want to preserve symbolic breakpoints on the list, since
  949. we have hopes that they can be reset after the new a.out's
  950. symbol table is read.
  951. However, any "raw" breakpoints must be removed from the list
  952. (e.g., the solib bp's), since their address is probably invalid
  953. now.
  954. And, we DON'T want to call delete_breakpoints() here, since
  955. that may write the bp's "shadow contents" (the instruction
  956. value that was overwritten with a TRAP instruction). Since
  957. we now have a new a.out, those shadow contents aren't valid. */
  958. mark_breakpoints_out ();
  959. /* The target reports the exec event to the main thread, even if
  960. some other thread does the exec, and even if the main thread was
  961. stopped or already gone. We may still have non-leader threads of
  962. the process on our list. E.g., on targets that don't have thread
  963. exit events (like remote); or on native Linux in non-stop mode if
  964. there were only two threads in the inferior and the non-leader
  965. one is the one that execs (and nothing forces an update of the
  966. thread list up to here). When debugging remotely, it's best to
  967. avoid extra traffic, when possible, so avoid syncing the thread
  968. list with the target, and instead go ahead and delete all threads
  969. of the process but one that reported the event. Note this must
  970. be done before calling update_breakpoints_after_exec, as
  971. otherwise clearing the threads' resources would reference stale
  972. thread breakpoints -- it may have been one of these threads that
  973. stepped across the exec. We could just clear their stepping
  974. states, but as long as we're iterating, might as well delete
  975. them. Deleting them now rather than at the next user-visible
  976. stop provides a nicer sequence of events for user and MI
  977. notifications. */
  978. for (thread_info *th : all_threads_safe ())
  979. if (th->ptid.pid () == pid && th->ptid != ptid)
  980. delete_thread (th);
  981. /* We also need to clear any left over stale state for the
  982. leader/event thread. E.g., if there was any step-resume
  983. breakpoint or similar, it's gone now. We cannot truly
  984. step-to-next statement through an exec(). */
  985. thread_info *th = inferior_thread ();
  986. th->control.step_resume_breakpoint = NULL;
  987. th->control.exception_resume_breakpoint = NULL;
  988. th->control.single_step_breakpoints = NULL;
  989. th->control.step_range_start = 0;
  990. th->control.step_range_end = 0;
  991. /* The user may have had the main thread held stopped in the
  992. previous image (e.g., schedlock on, or non-stop). Release
  993. it now. */
  994. th->stop_requested = 0;
  995. update_breakpoints_after_exec ();
  996. /* What is this a.out's name? */
  997. process_ptid = ptid_t (pid);
  998. gdb_printf (_("%s is executing new program: %s\n"),
  999. target_pid_to_str (process_ptid).c_str (),
  1000. exec_file_target);
  1001. /* We've followed the inferior through an exec. Therefore, the
  1002. inferior has essentially been killed & reborn. */
  1003. breakpoint_init_inferior (inf_execd);
  1004. gdb::unique_xmalloc_ptr<char> exec_file_host
  1005. = exec_file_find (exec_file_target, NULL);
  1006. /* If we were unable to map the executable target pathname onto a host
  1007. pathname, tell the user that. Otherwise GDB's subsequent behavior
  1008. is confusing. Maybe it would even be better to stop at this point
  1009. so that the user can specify a file manually before continuing. */
  1010. if (exec_file_host == NULL)
  1011. warning (_("Could not load symbols for executable %s.\n"
  1012. "Do you need \"set sysroot\"?"),
  1013. exec_file_target);
  1014. /* Reset the shared library package. This ensures that we get a
  1015. shlib event when the child reaches "_start", at which point the
  1016. dld will have had a chance to initialize the child. */
  1017. /* Also, loading a symbol file below may trigger symbol lookups, and
  1018. we don't want those to be satisfied by the libraries of the
  1019. previous incarnation of this process. */
  1020. no_shared_libraries (NULL, 0);
  1021. struct inferior *inf = current_inferior ();
  1022. if (follow_exec_mode_string == follow_exec_mode_new)
  1023. {
  1024. /* The user wants to keep the old inferior and program spaces
  1025. around. Create a new fresh one, and switch to it. */
  1026. /* Do exit processing for the original inferior before setting the new
  1027. inferior's pid. Having two inferiors with the same pid would confuse
  1028. find_inferior_p(t)id. Transfer the terminal state and info from the
  1029. old to the new inferior. */
  1030. inferior *new_inferior = add_inferior_with_spaces ();
  1031. swap_terminal_info (new_inferior, inf);
  1032. exit_inferior_silent (inf);
  1033. new_inferior->pid = pid;
  1034. target_follow_exec (new_inferior, ptid, exec_file_target);
  1035. /* We continue with the new inferior. */
  1036. inf = new_inferior;
  1037. }
  1038. else
  1039. {
  1040. /* The old description may no longer be fit for the new image.
  1041. E.g, a 64-bit process exec'ed a 32-bit process. Clear the
  1042. old description; we'll read a new one below. No need to do
  1043. this on "follow-exec-mode new", as the old inferior stays
  1044. around (its description is later cleared/refetched on
  1045. restart). */
  1046. target_clear_description ();
  1047. target_follow_exec (inf, ptid, exec_file_target);
  1048. }
  1049. gdb_assert (current_inferior () == inf);
  1050. gdb_assert (current_program_space == inf->pspace);
  1051. /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
  1052. because the proper displacement for a PIE (Position Independent
  1053. Executable) main symbol file will only be computed by
  1054. solib_create_inferior_hook below. breakpoint_re_set would fail
  1055. to insert the breakpoints with the zero displacement. */
  1056. try_open_exec_file (exec_file_host.get (), inf, SYMFILE_DEFER_BP_RESET);
  1057. /* If the target can specify a description, read it. Must do this
  1058. after flipping to the new executable (because the target supplied
  1059. description must be compatible with the executable's
  1060. architecture, and the old executable may e.g., be 32-bit, while
  1061. the new one 64-bit), and before anything involving memory or
  1062. registers. */
  1063. target_find_description ();
  1064. gdb::observers::inferior_execd.notify (inf);
  1065. breakpoint_re_set ();
  1066. /* Reinsert all breakpoints. (Those which were symbolic have
  1067. been reset to the proper address in the new a.out, thanks
  1068. to symbol_file_command...). */
  1069. insert_breakpoints ();
  1070. /* The next resume of this inferior should bring it to the shlib
  1071. startup breakpoints. (If the user had also set bp's on
  1072. "main" from the old (parent) process, then they'll auto-
  1073. matically get reset there in the new process.). */
  1074. }
  1075. /* The chain of threads that need to do a step-over operation to get
  1076. past e.g., a breakpoint. What technique is used to step over the
  1077. breakpoint/watchpoint does not matter -- all threads end up in the
  1078. same queue, to maintain rough temporal order of execution, in order
  1079. to avoid starvation, otherwise, we could e.g., find ourselves
  1080. constantly stepping the same couple threads past their breakpoints
  1081. over and over, if the single-step finish fast enough. */
  1082. thread_step_over_list global_thread_step_over_list;
  1083. /* Bit flags indicating what the thread needs to step over. */
  1084. enum step_over_what_flag
  1085. {
  1086. /* Step over a breakpoint. */
  1087. STEP_OVER_BREAKPOINT = 1,
  1088. /* Step past a non-continuable watchpoint, in order to let the
  1089. instruction execute so we can evaluate the watchpoint
  1090. expression. */
  1091. STEP_OVER_WATCHPOINT = 2
  1092. };
  1093. DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
  1094. /* Info about an instruction that is being stepped over. */
  1095. struct step_over_info
  1096. {
  1097. /* If we're stepping past a breakpoint, this is the address space
  1098. and address of the instruction the breakpoint is set at. We'll
  1099. skip inserting all breakpoints here. Valid iff ASPACE is
  1100. non-NULL. */
  1101. const address_space *aspace = nullptr;
  1102. CORE_ADDR address = 0;
  1103. /* The instruction being stepped over triggers a nonsteppable
  1104. watchpoint. If true, we'll skip inserting watchpoints. */
  1105. int nonsteppable_watchpoint_p = 0;
  1106. /* The thread's global number. */
  1107. int thread = -1;
  1108. };
  1109. /* The step-over info of the location that is being stepped over.
  1110. Note that with async/breakpoint always-inserted mode, a user might
  1111. set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
  1112. being stepped over. As setting a new breakpoint inserts all
  1113. breakpoints, we need to make sure the breakpoint being stepped over
  1114. isn't inserted then. We do that by only clearing the step-over
  1115. info when the step-over is actually finished (or aborted).
  1116. Presently GDB can only step over one breakpoint at any given time.
  1117. Given threads that can't run code in the same address space as the
  1118. breakpoint's can't really miss the breakpoint, GDB could be taught
  1119. to step-over at most one breakpoint per address space (so this info
  1120. could move to the address space object if/when GDB is extended).
  1121. The set of breakpoints being stepped over will normally be much
  1122. smaller than the set of all breakpoints, so a flag in the
  1123. breakpoint location structure would be wasteful. A separate list
  1124. also saves complexity and run-time, as otherwise we'd have to go
  1125. through all breakpoint locations clearing their flag whenever we
  1126. start a new sequence. Similar considerations weigh against storing
  1127. this info in the thread object. Plus, not all step overs actually
  1128. have breakpoint locations -- e.g., stepping past a single-step
  1129. breakpoint, or stepping to complete a non-continuable
  1130. watchpoint. */
  1131. static struct step_over_info step_over_info;
  1132. /* Record the address of the breakpoint/instruction we're currently
  1133. stepping over.
  1134. N.B. We record the aspace and address now, instead of say just the thread,
  1135. because when we need the info later the thread may be running. */
  1136. static void
  1137. set_step_over_info (const address_space *aspace, CORE_ADDR address,
  1138. int nonsteppable_watchpoint_p,
  1139. int thread)
  1140. {
  1141. step_over_info.aspace = aspace;
  1142. step_over_info.address = address;
  1143. step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
  1144. step_over_info.thread = thread;
  1145. }
  1146. /* Called when we're not longer stepping over a breakpoint / an
  1147. instruction, so all breakpoints are free to be (re)inserted. */
  1148. static void
  1149. clear_step_over_info (void)
  1150. {
  1151. infrun_debug_printf ("clearing step over info");
  1152. step_over_info.aspace = NULL;
  1153. step_over_info.address = 0;
  1154. step_over_info.nonsteppable_watchpoint_p = 0;
  1155. step_over_info.thread = -1;
  1156. }
  1157. /* See infrun.h. */
  1158. int
  1159. stepping_past_instruction_at (struct address_space *aspace,
  1160. CORE_ADDR address)
  1161. {
  1162. return (step_over_info.aspace != NULL
  1163. && breakpoint_address_match (aspace, address,
  1164. step_over_info.aspace,
  1165. step_over_info.address));
  1166. }
  1167. /* See infrun.h. */
  1168. int
  1169. thread_is_stepping_over_breakpoint (int thread)
  1170. {
  1171. return (step_over_info.thread != -1
  1172. && thread == step_over_info.thread);
  1173. }
  1174. /* See infrun.h. */
  1175. int
  1176. stepping_past_nonsteppable_watchpoint (void)
  1177. {
  1178. return step_over_info.nonsteppable_watchpoint_p;
  1179. }
  1180. /* Returns true if step-over info is valid. */
  1181. static bool
  1182. step_over_info_valid_p (void)
  1183. {
  1184. return (step_over_info.aspace != NULL
  1185. || stepping_past_nonsteppable_watchpoint ());
  1186. }
  1187. /* Displaced stepping. */
  1188. /* In non-stop debugging mode, we must take special care to manage
  1189. breakpoints properly; in particular, the traditional strategy for
  1190. stepping a thread past a breakpoint it has hit is unsuitable.
  1191. 'Displaced stepping' is a tactic for stepping one thread past a
  1192. breakpoint it has hit while ensuring that other threads running
  1193. concurrently will hit the breakpoint as they should.
  1194. The traditional way to step a thread T off a breakpoint in a
  1195. multi-threaded program in all-stop mode is as follows:
  1196. a0) Initially, all threads are stopped, and breakpoints are not
  1197. inserted.
  1198. a1) We single-step T, leaving breakpoints uninserted.
  1199. a2) We insert breakpoints, and resume all threads.
  1200. In non-stop debugging, however, this strategy is unsuitable: we
  1201. don't want to have to stop all threads in the system in order to
  1202. continue or step T past a breakpoint. Instead, we use displaced
  1203. stepping:
  1204. n0) Initially, T is stopped, other threads are running, and
  1205. breakpoints are inserted.
  1206. n1) We copy the instruction "under" the breakpoint to a separate
  1207. location, outside the main code stream, making any adjustments
  1208. to the instruction, register, and memory state as directed by
  1209. T's architecture.
  1210. n2) We single-step T over the instruction at its new location.
  1211. n3) We adjust the resulting register and memory state as directed
  1212. by T's architecture. This includes resetting T's PC to point
  1213. back into the main instruction stream.
  1214. n4) We resume T.
  1215. This approach depends on the following gdbarch methods:
  1216. - gdbarch_max_insn_length and gdbarch_displaced_step_location
  1217. indicate where to copy the instruction, and how much space must
  1218. be reserved there. We use these in step n1.
  1219. - gdbarch_displaced_step_copy_insn copies a instruction to a new
  1220. address, and makes any necessary adjustments to the instruction,
  1221. register contents, and memory. We use this in step n1.
  1222. - gdbarch_displaced_step_fixup adjusts registers and memory after
  1223. we have successfully single-stepped the instruction, to yield the
  1224. same effect the instruction would have had if we had executed it
  1225. at its original address. We use this in step n3.
  1226. The gdbarch_displaced_step_copy_insn and
  1227. gdbarch_displaced_step_fixup functions must be written so that
  1228. copying an instruction with gdbarch_displaced_step_copy_insn,
  1229. single-stepping across the copied instruction, and then applying
  1230. gdbarch_displaced_insn_fixup should have the same effects on the
  1231. thread's memory and registers as stepping the instruction in place
  1232. would have. Exactly which responsibilities fall to the copy and
  1233. which fall to the fixup is up to the author of those functions.
  1234. See the comments in gdbarch.sh for details.
  1235. Note that displaced stepping and software single-step cannot
  1236. currently be used in combination, although with some care I think
  1237. they could be made to. Software single-step works by placing
  1238. breakpoints on all possible subsequent instructions; if the
  1239. displaced instruction is a PC-relative jump, those breakpoints
  1240. could fall in very strange places --- on pages that aren't
  1241. executable, or at addresses that are not proper instruction
  1242. boundaries. (We do generally let other threads run while we wait
  1243. to hit the software single-step breakpoint, and they might
  1244. encounter such a corrupted instruction.) One way to work around
  1245. this would be to have gdbarch_displaced_step_copy_insn fully
  1246. simulate the effect of PC-relative instructions (and return NULL)
  1247. on architectures that use software single-stepping.
  1248. In non-stop mode, we can have independent and simultaneous step
  1249. requests, so more than one thread may need to simultaneously step
  1250. over a breakpoint. The current implementation assumes there is
  1251. only one scratch space per process. In this case, we have to
  1252. serialize access to the scratch space. If thread A wants to step
  1253. over a breakpoint, but we are currently waiting for some other
  1254. thread to complete a displaced step, we leave thread A stopped and
  1255. place it in the displaced_step_request_queue. Whenever a displaced
  1256. step finishes, we pick the next thread in the queue and start a new
  1257. displaced step operation on it. See displaced_step_prepare and
  1258. displaced_step_finish for details. */
  1259. /* Return true if THREAD is doing a displaced step. */
  1260. static bool
  1261. displaced_step_in_progress_thread (thread_info *thread)
  1262. {
  1263. gdb_assert (thread != NULL);
  1264. return thread->displaced_step_state.in_progress ();
  1265. }
  1266. /* Return true if INF has a thread doing a displaced step. */
  1267. static bool
  1268. displaced_step_in_progress (inferior *inf)
  1269. {
  1270. return inf->displaced_step_state.in_progress_count > 0;
  1271. }
  1272. /* Return true if any thread is doing a displaced step. */
  1273. static bool
  1274. displaced_step_in_progress_any_thread ()
  1275. {
  1276. for (inferior *inf : all_non_exited_inferiors ())
  1277. {
  1278. if (displaced_step_in_progress (inf))
  1279. return true;
  1280. }
  1281. return false;
  1282. }
  1283. static void
  1284. infrun_inferior_exit (struct inferior *inf)
  1285. {
  1286. inf->displaced_step_state.reset ();
  1287. inf->thread_waiting_for_vfork_done = nullptr;
  1288. }
  1289. static void
  1290. infrun_inferior_execd (inferior *inf)
  1291. {
  1292. /* If some threads where was doing a displaced step in this inferior at the
  1293. moment of the exec, they no longer exist. Even if the exec'ing thread
  1294. doing a displaced step, we don't want to to any fixup nor restore displaced
  1295. stepping buffer bytes. */
  1296. inf->displaced_step_state.reset ();
  1297. for (thread_info *thread : inf->threads ())
  1298. thread->displaced_step_state.reset ();
  1299. /* Since an in-line step is done with everything else stopped, if there was
  1300. one in progress at the time of the exec, it must have been the exec'ing
  1301. thread. */
  1302. clear_step_over_info ();
  1303. inf->thread_waiting_for_vfork_done = nullptr;
  1304. }
  1305. /* If ON, and the architecture supports it, GDB will use displaced
  1306. stepping to step over breakpoints. If OFF, or if the architecture
  1307. doesn't support it, GDB will instead use the traditional
  1308. hold-and-step approach. If AUTO (which is the default), GDB will
  1309. decide which technique to use to step over breakpoints depending on
  1310. whether the target works in a non-stop way (see use_displaced_stepping). */
  1311. static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
  1312. static void
  1313. show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
  1314. struct cmd_list_element *c,
  1315. const char *value)
  1316. {
  1317. if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
  1318. gdb_printf (file,
  1319. _("Debugger's willingness to use displaced stepping "
  1320. "to step over breakpoints is %s (currently %s).\n"),
  1321. value, target_is_non_stop_p () ? "on" : "off");
  1322. else
  1323. gdb_printf (file,
  1324. _("Debugger's willingness to use displaced stepping "
  1325. "to step over breakpoints is %s.\n"), value);
  1326. }
  1327. /* Return true if the gdbarch implements the required methods to use
  1328. displaced stepping. */
  1329. static bool
  1330. gdbarch_supports_displaced_stepping (gdbarch *arch)
  1331. {
  1332. /* Only check for the presence of `prepare`. The gdbarch verification ensures
  1333. that if `prepare` is provided, so is `finish`. */
  1334. return gdbarch_displaced_step_prepare_p (arch);
  1335. }
  1336. /* Return non-zero if displaced stepping can/should be used to step
  1337. over breakpoints of thread TP. */
  1338. static bool
  1339. use_displaced_stepping (thread_info *tp)
  1340. {
  1341. /* If the user disabled it explicitly, don't use displaced stepping. */
  1342. if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
  1343. return false;
  1344. /* If "auto", only use displaced stepping if the target operates in a non-stop
  1345. way. */
  1346. if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
  1347. && !target_is_non_stop_p ())
  1348. return false;
  1349. gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
  1350. /* If the architecture doesn't implement displaced stepping, don't use
  1351. it. */
  1352. if (!gdbarch_supports_displaced_stepping (gdbarch))
  1353. return false;
  1354. /* If recording, don't use displaced stepping. */
  1355. if (find_record_target () != nullptr)
  1356. return false;
  1357. /* If displaced stepping failed before for this inferior, don't bother trying
  1358. again. */
  1359. if (tp->inf->displaced_step_state.failed_before)
  1360. return false;
  1361. return true;
  1362. }
  1363. /* Simple function wrapper around displaced_step_thread_state::reset. */
  1364. static void
  1365. displaced_step_reset (displaced_step_thread_state *displaced)
  1366. {
  1367. displaced->reset ();
  1368. }
  1369. /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
  1370. SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
  1371. using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
  1372. /* See infrun.h. */
  1373. std::string
  1374. displaced_step_dump_bytes (const gdb_byte *buf, size_t len)
  1375. {
  1376. std::string ret;
  1377. for (size_t i = 0; i < len; i++)
  1378. {
  1379. if (i == 0)
  1380. ret += string_printf ("%02x", buf[i]);
  1381. else
  1382. ret += string_printf (" %02x", buf[i]);
  1383. }
  1384. return ret;
  1385. }
  1386. /* Prepare to single-step, using displaced stepping.
  1387. Note that we cannot use displaced stepping when we have a signal to
  1388. deliver. If we have a signal to deliver and an instruction to step
  1389. over, then after the step, there will be no indication from the
  1390. target whether the thread entered a signal handler or ignored the
  1391. signal and stepped over the instruction successfully --- both cases
  1392. result in a simple SIGTRAP. In the first case we mustn't do a
  1393. fixup, and in the second case we must --- but we can't tell which.
  1394. Comments in the code for 'random signals' in handle_inferior_event
  1395. explain how we handle this case instead.
  1396. Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
  1397. thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
  1398. if displaced stepping this thread got queued; or
  1399. DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
  1400. stepped. */
  1401. static displaced_step_prepare_status
  1402. displaced_step_prepare_throw (thread_info *tp)
  1403. {
  1404. regcache *regcache = get_thread_regcache (tp);
  1405. struct gdbarch *gdbarch = regcache->arch ();
  1406. displaced_step_thread_state &disp_step_thread_state
  1407. = tp->displaced_step_state;
  1408. /* We should never reach this function if the architecture does not
  1409. support displaced stepping. */
  1410. gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
  1411. /* Nor if the thread isn't meant to step over a breakpoint. */
  1412. gdb_assert (tp->control.trap_expected);
  1413. /* Disable range stepping while executing in the scratch pad. We
  1414. want a single-step even if executing the displaced instruction in
  1415. the scratch buffer lands within the stepping range (e.g., a
  1416. jump/branch). */
  1417. tp->control.may_range_step = 0;
  1418. /* We are about to start a displaced step for this thread. If one is already
  1419. in progress, something's wrong. */
  1420. gdb_assert (!disp_step_thread_state.in_progress ());
  1421. if (tp->inf->displaced_step_state.unavailable)
  1422. {
  1423. /* The gdbarch tells us it's not worth asking to try a prepare because
  1424. it is likely that it will return unavailable, so don't bother asking. */
  1425. displaced_debug_printf ("deferring step of %s",
  1426. tp->ptid.to_string ().c_str ());
  1427. global_thread_step_over_chain_enqueue (tp);
  1428. return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
  1429. }
  1430. displaced_debug_printf ("displaced-stepping %s now",
  1431. tp->ptid.to_string ().c_str ());
  1432. scoped_restore_current_thread restore_thread;
  1433. switch_to_thread (tp);
  1434. CORE_ADDR original_pc = regcache_read_pc (regcache);
  1435. CORE_ADDR displaced_pc;
  1436. displaced_step_prepare_status status
  1437. = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
  1438. if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
  1439. {
  1440. displaced_debug_printf ("failed to prepare (%s)",
  1441. tp->ptid.to_string ().c_str ());
  1442. return DISPLACED_STEP_PREPARE_STATUS_CANT;
  1443. }
  1444. else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
  1445. {
  1446. /* Not enough displaced stepping resources available, defer this
  1447. request by placing it the queue. */
  1448. displaced_debug_printf ("not enough resources available, "
  1449. "deferring step of %s",
  1450. tp->ptid.to_string ().c_str ());
  1451. global_thread_step_over_chain_enqueue (tp);
  1452. return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
  1453. }
  1454. gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
  1455. /* Save the information we need to fix things up if the step
  1456. succeeds. */
  1457. disp_step_thread_state.set (gdbarch);
  1458. tp->inf->displaced_step_state.in_progress_count++;
  1459. displaced_debug_printf ("prepared successfully thread=%s, "
  1460. "original_pc=%s, displaced_pc=%s",
  1461. tp->ptid.to_string ().c_str (),
  1462. paddress (gdbarch, original_pc),
  1463. paddress (gdbarch, displaced_pc));
  1464. return DISPLACED_STEP_PREPARE_STATUS_OK;
  1465. }
  1466. /* Wrapper for displaced_step_prepare_throw that disabled further
  1467. attempts at displaced stepping if we get a memory error. */
  1468. static displaced_step_prepare_status
  1469. displaced_step_prepare (thread_info *thread)
  1470. {
  1471. displaced_step_prepare_status status
  1472. = DISPLACED_STEP_PREPARE_STATUS_CANT;
  1473. try
  1474. {
  1475. status = displaced_step_prepare_throw (thread);
  1476. }
  1477. catch (const gdb_exception_error &ex)
  1478. {
  1479. if (ex.error != MEMORY_ERROR
  1480. && ex.error != NOT_SUPPORTED_ERROR)
  1481. throw;
  1482. infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
  1483. ex.what ());
  1484. /* Be verbose if "set displaced-stepping" is "on", silent if
  1485. "auto". */
  1486. if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
  1487. {
  1488. warning (_("disabling displaced stepping: %s"),
  1489. ex.what ());
  1490. }
  1491. /* Disable further displaced stepping attempts. */
  1492. thread->inf->displaced_step_state.failed_before = 1;
  1493. }
  1494. return status;
  1495. }
  1496. /* If we displaced stepped an instruction successfully, adjust registers and
  1497. memory to yield the same effect the instruction would have had if we had
  1498. executed it at its original address, and return
  1499. DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
  1500. relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
  1501. If the thread wasn't displaced stepping, return
  1502. DISPLACED_STEP_FINISH_STATUS_OK as well. */
  1503. static displaced_step_finish_status
  1504. displaced_step_finish (thread_info *event_thread, enum gdb_signal signal)
  1505. {
  1506. displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
  1507. /* Was this thread performing a displaced step? */
  1508. if (!displaced->in_progress ())
  1509. return DISPLACED_STEP_FINISH_STATUS_OK;
  1510. gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
  1511. event_thread->inf->displaced_step_state.in_progress_count--;
  1512. /* Fixup may need to read memory/registers. Switch to the thread
  1513. that we're fixing up. Also, target_stopped_by_watchpoint checks
  1514. the current thread, and displaced_step_restore performs ptid-dependent
  1515. memory accesses using current_inferior(). */
  1516. switch_to_thread (event_thread);
  1517. displaced_step_reset_cleanup cleanup (displaced);
  1518. /* Do the fixup, and release the resources acquired to do the displaced
  1519. step. */
  1520. return gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
  1521. event_thread, signal);
  1522. }
  1523. /* Data to be passed around while handling an event. This data is
  1524. discarded between events. */
  1525. struct execution_control_state
  1526. {
  1527. execution_control_state ()
  1528. {
  1529. this->reset ();
  1530. }
  1531. void reset ()
  1532. {
  1533. this->target = nullptr;
  1534. this->ptid = null_ptid;
  1535. this->event_thread = nullptr;
  1536. ws = target_waitstatus ();
  1537. stop_func_filled_in = 0;
  1538. stop_func_start = 0;
  1539. stop_func_end = 0;
  1540. stop_func_name = nullptr;
  1541. wait_some_more = 0;
  1542. hit_singlestep_breakpoint = 0;
  1543. }
  1544. process_stratum_target *target;
  1545. ptid_t ptid;
  1546. /* The thread that got the event, if this was a thread event; NULL
  1547. otherwise. */
  1548. struct thread_info *event_thread;
  1549. struct target_waitstatus ws;
  1550. int stop_func_filled_in;
  1551. CORE_ADDR stop_func_start;
  1552. CORE_ADDR stop_func_end;
  1553. const char *stop_func_name;
  1554. int wait_some_more;
  1555. /* True if the event thread hit the single-step breakpoint of
  1556. another thread. Thus the event doesn't cause a stop, the thread
  1557. needs to be single-stepped past the single-step breakpoint before
  1558. we can switch back to the original stepping thread. */
  1559. int hit_singlestep_breakpoint;
  1560. };
  1561. /* Clear ECS and set it to point at TP. */
  1562. static void
  1563. reset_ecs (struct execution_control_state *ecs, struct thread_info *tp)
  1564. {
  1565. ecs->reset ();
  1566. ecs->event_thread = tp;
  1567. ecs->ptid = tp->ptid;
  1568. }
  1569. static void keep_going_pass_signal (struct execution_control_state *ecs);
  1570. static void prepare_to_wait (struct execution_control_state *ecs);
  1571. static bool keep_going_stepped_thread (struct thread_info *tp);
  1572. static step_over_what thread_still_needs_step_over (struct thread_info *tp);
  1573. /* Are there any pending step-over requests? If so, run all we can
  1574. now and return true. Otherwise, return false. */
  1575. static bool
  1576. start_step_over (void)
  1577. {
  1578. INFRUN_SCOPED_DEBUG_ENTER_EXIT;
  1579. /* Don't start a new step-over if we already have an in-line
  1580. step-over operation ongoing. */
  1581. if (step_over_info_valid_p ())
  1582. return false;
  1583. /* Steal the global thread step over chain. As we try to initiate displaced
  1584. steps, threads will be enqueued in the global chain if no buffers are
  1585. available. If we iterated on the global chain directly, we might iterate
  1586. indefinitely. */
  1587. thread_step_over_list threads_to_step
  1588. = std::move (global_thread_step_over_list);
  1589. infrun_debug_printf ("stealing global queue of threads to step, length = %d",
  1590. thread_step_over_chain_length (threads_to_step));
  1591. bool started = false;
  1592. /* On scope exit (whatever the reason, return or exception), if there are
  1593. threads left in the THREADS_TO_STEP chain, put back these threads in the
  1594. global list. */
  1595. SCOPE_EXIT
  1596. {
  1597. if (threads_to_step.empty ())
  1598. infrun_debug_printf ("step-over queue now empty");
  1599. else
  1600. {
  1601. infrun_debug_printf ("putting back %d threads to step in global queue",
  1602. thread_step_over_chain_length (threads_to_step));
  1603. global_thread_step_over_chain_enqueue_chain
  1604. (std::move (threads_to_step));
  1605. }
  1606. };
  1607. thread_step_over_list_safe_range range
  1608. = make_thread_step_over_list_safe_range (threads_to_step);
  1609. for (thread_info *tp : range)
  1610. {
  1611. struct execution_control_state ecss;
  1612. struct execution_control_state *ecs = &ecss;
  1613. step_over_what step_what;
  1614. int must_be_in_line;
  1615. gdb_assert (!tp->stop_requested);
  1616. if (tp->inf->displaced_step_state.unavailable)
  1617. {
  1618. /* The arch told us to not even try preparing another displaced step
  1619. for this inferior. Just leave the thread in THREADS_TO_STEP, it
  1620. will get moved to the global chain on scope exit. */
  1621. continue;
  1622. }
  1623. if (tp->inf->thread_waiting_for_vfork_done != nullptr)
  1624. {
  1625. /* When we stop all threads, handling a vfork, any thread in the step
  1626. over chain remains there. A user could also try to continue a
  1627. thread stopped at a breakpoint while another thread is waiting for
  1628. a vfork-done event. In any case, we don't want to start a step
  1629. over right now. */
  1630. continue;
  1631. }
  1632. /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
  1633. while we try to prepare the displaced step, we don't add it back to
  1634. the global step over chain. This is to avoid a thread staying in the
  1635. step over chain indefinitely if something goes wrong when resuming it
  1636. If the error is intermittent and it still needs a step over, it will
  1637. get enqueued again when we try to resume it normally. */
  1638. threads_to_step.erase (threads_to_step.iterator_to (*tp));
  1639. step_what = thread_still_needs_step_over (tp);
  1640. must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
  1641. || ((step_what & STEP_OVER_BREAKPOINT)
  1642. && !use_displaced_stepping (tp)));
  1643. /* We currently stop all threads of all processes to step-over
  1644. in-line. If we need to start a new in-line step-over, let
  1645. any pending displaced steps finish first. */
  1646. if (must_be_in_line && displaced_step_in_progress_any_thread ())
  1647. {
  1648. global_thread_step_over_chain_enqueue (tp);
  1649. continue;
  1650. }
  1651. if (tp->control.trap_expected
  1652. || tp->resumed ()
  1653. || tp->executing ())
  1654. {
  1655. internal_error (__FILE__, __LINE__,
  1656. "[%s] has inconsistent state: "
  1657. "trap_expected=%d, resumed=%d, executing=%d\n",
  1658. tp->ptid.to_string ().c_str (),
  1659. tp->control.trap_expected,
  1660. tp->resumed (),
  1661. tp->executing ());
  1662. }
  1663. infrun_debug_printf ("resuming [%s] for step-over",
  1664. tp->ptid.to_string ().c_str ());
  1665. /* keep_going_pass_signal skips the step-over if the breakpoint
  1666. is no longer inserted. In all-stop, we want to keep looking
  1667. for a thread that needs a step-over instead of resuming TP,
  1668. because we wouldn't be able to resume anything else until the
  1669. target stops again. In non-stop, the resume always resumes
  1670. only TP, so it's OK to let the thread resume freely. */
  1671. if (!target_is_non_stop_p () && !step_what)
  1672. continue;
  1673. switch_to_thread (tp);
  1674. reset_ecs (ecs, tp);
  1675. keep_going_pass_signal (ecs);
  1676. if (!ecs->wait_some_more)
  1677. error (_("Command aborted."));
  1678. /* If the thread's step over could not be initiated because no buffers
  1679. were available, it was re-added to the global step over chain. */
  1680. if (tp->resumed ())
  1681. {
  1682. infrun_debug_printf ("[%s] was resumed.",
  1683. tp->ptid.to_string ().c_str ());
  1684. gdb_assert (!thread_is_in_step_over_chain (tp));
  1685. }
  1686. else
  1687. {
  1688. infrun_debug_printf ("[%s] was NOT resumed.",
  1689. tp->ptid.to_string ().c_str ());
  1690. gdb_assert (thread_is_in_step_over_chain (tp));
  1691. }
  1692. /* If we started a new in-line step-over, we're done. */
  1693. if (step_over_info_valid_p ())
  1694. {
  1695. gdb_assert (tp->control.trap_expected);
  1696. started = true;
  1697. break;
  1698. }
  1699. if (!target_is_non_stop_p ())
  1700. {
  1701. /* On all-stop, shouldn't have resumed unless we needed a
  1702. step over. */
  1703. gdb_assert (tp->control.trap_expected
  1704. || tp->step_after_step_resume_breakpoint);
  1705. /* With remote targets (at least), in all-stop, we can't
  1706. issue any further remote commands until the program stops
  1707. again. */
  1708. started = true;
  1709. break;
  1710. }
  1711. /* Either the thread no longer needed a step-over, or a new
  1712. displaced stepping sequence started. Even in the latter
  1713. case, continue looking. Maybe we can also start another
  1714. displaced step on a thread of other process. */
  1715. }
  1716. return started;
  1717. }
  1718. /* Update global variables holding ptids to hold NEW_PTID if they were
  1719. holding OLD_PTID. */
  1720. static void
  1721. infrun_thread_ptid_changed (process_stratum_target *target,
  1722. ptid_t old_ptid, ptid_t new_ptid)
  1723. {
  1724. if (inferior_ptid == old_ptid
  1725. && current_inferior ()->process_target () == target)
  1726. inferior_ptid = new_ptid;
  1727. }
  1728. static const char schedlock_off[] = "off";
  1729. static const char schedlock_on[] = "on";
  1730. static const char schedlock_step[] = "step";
  1731. static const char schedlock_replay[] = "replay";
  1732. static const char *const scheduler_enums[] = {
  1733. schedlock_off,
  1734. schedlock_on,
  1735. schedlock_step,
  1736. schedlock_replay,
  1737. NULL
  1738. };
  1739. static const char *scheduler_mode = schedlock_replay;
  1740. static void
  1741. show_scheduler_mode (struct ui_file *file, int from_tty,
  1742. struct cmd_list_element *c, const char *value)
  1743. {
  1744. gdb_printf (file,
  1745. _("Mode for locking scheduler "
  1746. "during execution is \"%s\".\n"),
  1747. value);
  1748. }
  1749. static void
  1750. set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
  1751. {
  1752. if (!target_can_lock_scheduler ())
  1753. {
  1754. scheduler_mode = schedlock_off;
  1755. error (_("Target '%s' cannot support this command."),
  1756. target_shortname ());
  1757. }
  1758. }
  1759. /* True if execution commands resume all threads of all processes by
  1760. default; otherwise, resume only threads of the current inferior
  1761. process. */
  1762. bool sched_multi = false;
  1763. /* Try to setup for software single stepping. Return true if target_resume()
  1764. should use hardware single step.
  1765. GDBARCH the current gdbarch. */
  1766. static bool
  1767. maybe_software_singlestep (struct gdbarch *gdbarch)
  1768. {
  1769. bool hw_step = true;
  1770. if (execution_direction == EXEC_FORWARD
  1771. && gdbarch_software_single_step_p (gdbarch))
  1772. hw_step = !insert_single_step_breakpoints (gdbarch);
  1773. return hw_step;
  1774. }
  1775. /* See infrun.h. */
  1776. ptid_t
  1777. user_visible_resume_ptid (int step)
  1778. {
  1779. ptid_t resume_ptid;
  1780. if (non_stop)
  1781. {
  1782. /* With non-stop mode on, threads are always handled
  1783. individually. */
  1784. resume_ptid = inferior_ptid;
  1785. }
  1786. else if ((scheduler_mode == schedlock_on)
  1787. || (scheduler_mode == schedlock_step && step))
  1788. {
  1789. /* User-settable 'scheduler' mode requires solo thread
  1790. resume. */
  1791. resume_ptid = inferior_ptid;
  1792. }
  1793. else if ((scheduler_mode == schedlock_replay)
  1794. && target_record_will_replay (minus_one_ptid, execution_direction))
  1795. {
  1796. /* User-settable 'scheduler' mode requires solo thread resume in replay
  1797. mode. */
  1798. resume_ptid = inferior_ptid;
  1799. }
  1800. else if (!sched_multi && target_supports_multi_process ())
  1801. {
  1802. /* Resume all threads of the current process (and none of other
  1803. processes). */
  1804. resume_ptid = ptid_t (inferior_ptid.pid ());
  1805. }
  1806. else
  1807. {
  1808. /* Resume all threads of all processes. */
  1809. resume_ptid = RESUME_ALL;
  1810. }
  1811. return resume_ptid;
  1812. }
  1813. /* See infrun.h. */
  1814. process_stratum_target *
  1815. user_visible_resume_target (ptid_t resume_ptid)
  1816. {
  1817. return (resume_ptid == minus_one_ptid && sched_multi
  1818. ? NULL
  1819. : current_inferior ()->process_target ());
  1820. }
  1821. /* Return a ptid representing the set of threads that we will resume,
  1822. in the perspective of the target, assuming run control handling
  1823. does not require leaving some threads stopped (e.g., stepping past
  1824. breakpoint). USER_STEP indicates whether we're about to start the
  1825. target for a stepping command. */
  1826. static ptid_t
  1827. internal_resume_ptid (int user_step)
  1828. {
  1829. /* In non-stop, we always control threads individually. Note that
  1830. the target may always work in non-stop mode even with "set
  1831. non-stop off", in which case user_visible_resume_ptid could
  1832. return a wildcard ptid. */
  1833. if (target_is_non_stop_p ())
  1834. return inferior_ptid;
  1835. /* The rest of the function assumes non-stop==off and
  1836. target-non-stop==off.
  1837. If a thread is waiting for a vfork-done event, it means breakpoints are out
  1838. for this inferior (well, program space in fact). We don't want to resume
  1839. any thread other than the one waiting for vfork done, otherwise these other
  1840. threads could miss breakpoints. So if a thread in the resumption set is
  1841. waiting for a vfork-done event, resume only that thread.
  1842. The resumption set width depends on whether schedule-multiple is on or off.
  1843. Note that if the target_resume interface was more flexible, we could be
  1844. smarter here when schedule-multiple is on. For example, imagine 3
  1845. inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
  1846. 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
  1847. target(s) to resume:
  1848. - All threads of inferior 1
  1849. - Thread 2.1
  1850. - Thread 3.2
  1851. Since we don't have that flexibility (we can only pass one ptid), just
  1852. resume the first thread waiting for a vfork-done event we find (e.g. thread
  1853. 2.1). */
  1854. if (sched_multi)
  1855. {
  1856. for (inferior *inf : all_non_exited_inferiors ())
  1857. if (inf->thread_waiting_for_vfork_done != nullptr)
  1858. return inf->thread_waiting_for_vfork_done->ptid;
  1859. }
  1860. else if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
  1861. return current_inferior ()->thread_waiting_for_vfork_done->ptid;
  1862. return user_visible_resume_ptid (user_step);
  1863. }
  1864. /* Wrapper for target_resume, that handles infrun-specific
  1865. bookkeeping. */
  1866. static void
  1867. do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
  1868. {
  1869. struct thread_info *tp = inferior_thread ();
  1870. gdb_assert (!tp->stop_requested);
  1871. /* Install inferior's terminal modes. */
  1872. target_terminal::inferior ();
  1873. /* Avoid confusing the next resume, if the next stop/resume
  1874. happens to apply to another thread. */
  1875. tp->set_stop_signal (GDB_SIGNAL_0);
  1876. /* Advise target which signals may be handled silently.
  1877. If we have removed breakpoints because we are stepping over one
  1878. in-line (in any thread), we need to receive all signals to avoid
  1879. accidentally skipping a breakpoint during execution of a signal
  1880. handler.
  1881. Likewise if we're displaced stepping, otherwise a trap for a
  1882. breakpoint in a signal handler might be confused with the
  1883. displaced step finishing. We don't make the displaced_step_finish
  1884. step distinguish the cases instead, because:
  1885. - a backtrace while stopped in the signal handler would show the
  1886. scratch pad as frame older than the signal handler, instead of
  1887. the real mainline code.
  1888. - when the thread is later resumed, the signal handler would
  1889. return to the scratch pad area, which would no longer be
  1890. valid. */
  1891. if (step_over_info_valid_p ()
  1892. || displaced_step_in_progress (tp->inf))
  1893. target_pass_signals ({});
  1894. else
  1895. target_pass_signals (signal_pass);
  1896. infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
  1897. resume_ptid.to_string ().c_str (),
  1898. step, gdb_signal_to_symbol_string (sig));
  1899. target_resume (resume_ptid, step, sig);
  1900. }
  1901. /* Resume the inferior. SIG is the signal to give the inferior
  1902. (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
  1903. call 'resume', which handles exceptions. */
  1904. static void
  1905. resume_1 (enum gdb_signal sig)
  1906. {
  1907. struct regcache *regcache = get_current_regcache ();
  1908. struct gdbarch *gdbarch = regcache->arch ();
  1909. struct thread_info *tp = inferior_thread ();
  1910. const address_space *aspace = regcache->aspace ();
  1911. ptid_t resume_ptid;
  1912. /* This represents the user's step vs continue request. When
  1913. deciding whether "set scheduler-locking step" applies, it's the
  1914. user's intention that counts. */
  1915. const int user_step = tp->control.stepping_command;
  1916. /* This represents what we'll actually request the target to do.
  1917. This can decay from a step to a continue, if e.g., we need to
  1918. implement single-stepping with breakpoints (software
  1919. single-step). */
  1920. bool step;
  1921. gdb_assert (!tp->stop_requested);
  1922. gdb_assert (!thread_is_in_step_over_chain (tp));
  1923. if (tp->has_pending_waitstatus ())
  1924. {
  1925. infrun_debug_printf
  1926. ("thread %s has pending wait "
  1927. "status %s (currently_stepping=%d).",
  1928. tp->ptid.to_string ().c_str (),
  1929. tp->pending_waitstatus ().to_string ().c_str (),
  1930. currently_stepping (tp));
  1931. tp->inf->process_target ()->threads_executing = true;
  1932. tp->set_resumed (true);
  1933. /* FIXME: What should we do if we are supposed to resume this
  1934. thread with a signal? Maybe we should maintain a queue of
  1935. pending signals to deliver. */
  1936. if (sig != GDB_SIGNAL_0)
  1937. {
  1938. warning (_("Couldn't deliver signal %s to %s."),
  1939. gdb_signal_to_name (sig),
  1940. tp->ptid.to_string ().c_str ());
  1941. }
  1942. tp->set_stop_signal (GDB_SIGNAL_0);
  1943. if (target_can_async_p ())
  1944. {
  1945. target_async (1);
  1946. /* Tell the event loop we have an event to process. */
  1947. mark_async_event_handler (infrun_async_inferior_event_token);
  1948. }
  1949. return;
  1950. }
  1951. tp->stepped_breakpoint = 0;
  1952. /* Depends on stepped_breakpoint. */
  1953. step = currently_stepping (tp);
  1954. if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
  1955. {
  1956. /* Don't try to single-step a vfork parent that is waiting for
  1957. the child to get out of the shared memory region (by exec'ing
  1958. or exiting). This is particularly important on software
  1959. single-step archs, as the child process would trip on the
  1960. software single step breakpoint inserted for the parent
  1961. process. Since the parent will not actually execute any
  1962. instruction until the child is out of the shared region (such
  1963. are vfork's semantics), it is safe to simply continue it.
  1964. Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
  1965. the parent, and tell it to `keep_going', which automatically
  1966. re-sets it stepping. */
  1967. infrun_debug_printf ("resume : clear step");
  1968. step = false;
  1969. }
  1970. CORE_ADDR pc = regcache_read_pc (regcache);
  1971. infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
  1972. "current thread [%s] at %s",
  1973. step, gdb_signal_to_symbol_string (sig),
  1974. tp->control.trap_expected,
  1975. inferior_ptid.to_string ().c_str (),
  1976. paddress (gdbarch, pc));
  1977. /* Normally, by the time we reach `resume', the breakpoints are either
  1978. removed or inserted, as appropriate. The exception is if we're sitting
  1979. at a permanent breakpoint; we need to step over it, but permanent
  1980. breakpoints can't be removed. So we have to test for it here. */
  1981. if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
  1982. {
  1983. if (sig != GDB_SIGNAL_0)
  1984. {
  1985. /* We have a signal to pass to the inferior. The resume
  1986. may, or may not take us to the signal handler. If this
  1987. is a step, we'll need to stop in the signal handler, if
  1988. there's one, (if the target supports stepping into
  1989. handlers), or in the next mainline instruction, if
  1990. there's no handler. If this is a continue, we need to be
  1991. sure to run the handler with all breakpoints inserted.
  1992. In all cases, set a breakpoint at the current address
  1993. (where the handler returns to), and once that breakpoint
  1994. is hit, resume skipping the permanent breakpoint. If
  1995. that breakpoint isn't hit, then we've stepped into the
  1996. signal handler (or hit some other event). We'll delete
  1997. the step-resume breakpoint then. */
  1998. infrun_debug_printf ("resume: skipping permanent breakpoint, "
  1999. "deliver signal first");
  2000. clear_step_over_info ();
  2001. tp->control.trap_expected = 0;
  2002. if (tp->control.step_resume_breakpoint == NULL)
  2003. {
  2004. /* Set a "high-priority" step-resume, as we don't want
  2005. user breakpoints at PC to trigger (again) when this
  2006. hits. */
  2007. insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
  2008. gdb_assert (tp->control.step_resume_breakpoint->loc->permanent);
  2009. tp->step_after_step_resume_breakpoint = step;
  2010. }
  2011. insert_breakpoints ();
  2012. }
  2013. else
  2014. {
  2015. /* There's no signal to pass, we can go ahead and skip the
  2016. permanent breakpoint manually. */
  2017. infrun_debug_printf ("skipping permanent breakpoint");
  2018. gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
  2019. /* Update pc to reflect the new address from which we will
  2020. execute instructions. */
  2021. pc = regcache_read_pc (regcache);
  2022. if (step)
  2023. {
  2024. /* We've already advanced the PC, so the stepping part
  2025. is done. Now we need to arrange for a trap to be
  2026. reported to handle_inferior_event. Set a breakpoint
  2027. at the current PC, and run to it. Don't update
  2028. prev_pc, because if we end in
  2029. switch_back_to_stepped_thread, we want the "expected
  2030. thread advanced also" branch to be taken. IOW, we
  2031. don't want this thread to step further from PC
  2032. (overstep). */
  2033. gdb_assert (!step_over_info_valid_p ());
  2034. insert_single_step_breakpoint (gdbarch, aspace, pc);
  2035. insert_breakpoints ();
  2036. resume_ptid = internal_resume_ptid (user_step);
  2037. do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
  2038. tp->set_resumed (true);
  2039. return;
  2040. }
  2041. }
  2042. }
  2043. /* If we have a breakpoint to step over, make sure to do a single
  2044. step only. Same if we have software watchpoints. */
  2045. if (tp->control.trap_expected || bpstat_should_step ())
  2046. tp->control.may_range_step = 0;
  2047. /* If displaced stepping is enabled, step over breakpoints by executing a
  2048. copy of the instruction at a different address.
  2049. We can't use displaced stepping when we have a signal to deliver;
  2050. the comments for displaced_step_prepare explain why. The
  2051. comments in the handle_inferior event for dealing with 'random
  2052. signals' explain what we do instead.
  2053. We can't use displaced stepping when we are waiting for vfork_done
  2054. event, displaced stepping breaks the vfork child similarly as single
  2055. step software breakpoint. */
  2056. if (tp->control.trap_expected
  2057. && use_displaced_stepping (tp)
  2058. && !step_over_info_valid_p ()
  2059. && sig == GDB_SIGNAL_0
  2060. && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
  2061. {
  2062. displaced_step_prepare_status prepare_status
  2063. = displaced_step_prepare (tp);
  2064. if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
  2065. {
  2066. infrun_debug_printf ("Got placed in step-over queue");
  2067. tp->control.trap_expected = 0;
  2068. return;
  2069. }
  2070. else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
  2071. {
  2072. /* Fallback to stepping over the breakpoint in-line. */
  2073. if (target_is_non_stop_p ())
  2074. stop_all_threads ("displaced stepping falling back on inline stepping");
  2075. set_step_over_info (regcache->aspace (),
  2076. regcache_read_pc (regcache), 0, tp->global_num);
  2077. step = maybe_software_singlestep (gdbarch);
  2078. insert_breakpoints ();
  2079. }
  2080. else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
  2081. {
  2082. /* Update pc to reflect the new address from which we will
  2083. execute instructions due to displaced stepping. */
  2084. pc = regcache_read_pc (get_thread_regcache (tp));
  2085. step = gdbarch_displaced_step_hw_singlestep (gdbarch);
  2086. }
  2087. else
  2088. gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
  2089. "value.");
  2090. }
  2091. /* Do we need to do it the hard way, w/temp breakpoints? */
  2092. else if (step)
  2093. step = maybe_software_singlestep (gdbarch);
  2094. /* Currently, our software single-step implementation leads to different
  2095. results than hardware single-stepping in one situation: when stepping
  2096. into delivering a signal which has an associated signal handler,
  2097. hardware single-step will stop at the first instruction of the handler,
  2098. while software single-step will simply skip execution of the handler.
  2099. For now, this difference in behavior is accepted since there is no
  2100. easy way to actually implement single-stepping into a signal handler
  2101. without kernel support.
  2102. However, there is one scenario where this difference leads to follow-on
  2103. problems: if we're stepping off a breakpoint by removing all breakpoints
  2104. and then single-stepping. In this case, the software single-step
  2105. behavior means that even if there is a *breakpoint* in the signal
  2106. handler, GDB still would not stop.
  2107. Fortunately, we can at least fix this particular issue. We detect
  2108. here the case where we are about to deliver a signal while software
  2109. single-stepping with breakpoints removed. In this situation, we
  2110. revert the decisions to remove all breakpoints and insert single-
  2111. step breakpoints, and instead we install a step-resume breakpoint
  2112. at the current address, deliver the signal without stepping, and
  2113. once we arrive back at the step-resume breakpoint, actually step
  2114. over the breakpoint we originally wanted to step over. */
  2115. if (thread_has_single_step_breakpoints_set (tp)
  2116. && sig != GDB_SIGNAL_0
  2117. && step_over_info_valid_p ())
  2118. {
  2119. /* If we have nested signals or a pending signal is delivered
  2120. immediately after a handler returns, might already have
  2121. a step-resume breakpoint set on the earlier handler. We cannot
  2122. set another step-resume breakpoint; just continue on until the
  2123. original breakpoint is hit. */
  2124. if (tp->control.step_resume_breakpoint == NULL)
  2125. {
  2126. insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
  2127. tp->step_after_step_resume_breakpoint = 1;
  2128. }
  2129. delete_single_step_breakpoints (tp);
  2130. clear_step_over_info ();
  2131. tp->control.trap_expected = 0;
  2132. insert_breakpoints ();
  2133. }
  2134. /* If STEP is set, it's a request to use hardware stepping
  2135. facilities. But in that case, we should never
  2136. use singlestep breakpoint. */
  2137. gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
  2138. /* Decide the set of threads to ask the target to resume. */
  2139. if (tp->control.trap_expected)
  2140. {
  2141. /* We're allowing a thread to run past a breakpoint it has
  2142. hit, either by single-stepping the thread with the breakpoint
  2143. removed, or by displaced stepping, with the breakpoint inserted.
  2144. In the former case, we need to single-step only this thread,
  2145. and keep others stopped, as they can miss this breakpoint if
  2146. allowed to run. That's not really a problem for displaced
  2147. stepping, but, we still keep other threads stopped, in case
  2148. another thread is also stopped for a breakpoint waiting for
  2149. its turn in the displaced stepping queue. */
  2150. resume_ptid = inferior_ptid;
  2151. }
  2152. else
  2153. resume_ptid = internal_resume_ptid (user_step);
  2154. if (execution_direction != EXEC_REVERSE
  2155. && step && breakpoint_inserted_here_p (aspace, pc))
  2156. {
  2157. /* There are two cases where we currently need to step a
  2158. breakpoint instruction when we have a signal to deliver:
  2159. - See handle_signal_stop where we handle random signals that
  2160. could take out us out of the stepping range. Normally, in
  2161. that case we end up continuing (instead of stepping) over the
  2162. signal handler with a breakpoint at PC, but there are cases
  2163. where we should _always_ single-step, even if we have a
  2164. step-resume breakpoint, like when a software watchpoint is
  2165. set. Assuming single-stepping and delivering a signal at the
  2166. same time would takes us to the signal handler, then we could
  2167. have removed the breakpoint at PC to step over it. However,
  2168. some hardware step targets (like e.g., Mac OS) can't step
  2169. into signal handlers, and for those, we need to leave the
  2170. breakpoint at PC inserted, as otherwise if the handler
  2171. recurses and executes PC again, it'll miss the breakpoint.
  2172. So we leave the breakpoint inserted anyway, but we need to
  2173. record that we tried to step a breakpoint instruction, so
  2174. that adjust_pc_after_break doesn't end up confused.
  2175. - In non-stop if we insert a breakpoint (e.g., a step-resume)
  2176. in one thread after another thread that was stepping had been
  2177. momentarily paused for a step-over. When we re-resume the
  2178. stepping thread, it may be resumed from that address with a
  2179. breakpoint that hasn't trapped yet. Seen with
  2180. gdb.threads/non-stop-fair-events.exp, on targets that don't
  2181. do displaced stepping. */
  2182. infrun_debug_printf ("resume: [%s] stepped breakpoint",
  2183. tp->ptid.to_string ().c_str ());
  2184. tp->stepped_breakpoint = 1;
  2185. /* Most targets can step a breakpoint instruction, thus
  2186. executing it normally. But if this one cannot, just
  2187. continue and we will hit it anyway. */
  2188. if (gdbarch_cannot_step_breakpoint (gdbarch))
  2189. step = false;
  2190. }
  2191. if (debug_displaced
  2192. && tp->control.trap_expected
  2193. && use_displaced_stepping (tp)
  2194. && !step_over_info_valid_p ())
  2195. {
  2196. struct regcache *resume_regcache = get_thread_regcache (tp);
  2197. struct gdbarch *resume_gdbarch = resume_regcache->arch ();
  2198. CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
  2199. gdb_byte buf[4];
  2200. read_memory (actual_pc, buf, sizeof (buf));
  2201. displaced_debug_printf ("run %s: %s",
  2202. paddress (resume_gdbarch, actual_pc),
  2203. displaced_step_dump_bytes
  2204. (buf, sizeof (buf)).c_str ());
  2205. }
  2206. if (tp->control.may_range_step)
  2207. {
  2208. /* If we're resuming a thread with the PC out of the step
  2209. range, then we're doing some nested/finer run control
  2210. operation, like stepping the thread out of the dynamic
  2211. linker or the displaced stepping scratch pad. We
  2212. shouldn't have allowed a range step then. */
  2213. gdb_assert (pc_in_thread_step_range (pc, tp));
  2214. }
  2215. do_target_resume (resume_ptid, step, sig);
  2216. tp->set_resumed (true);
  2217. }
  2218. /* Resume the inferior. SIG is the signal to give the inferior
  2219. (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
  2220. rolls back state on error. */
  2221. static void
  2222. resume (gdb_signal sig)
  2223. {
  2224. try
  2225. {
  2226. resume_1 (sig);
  2227. }
  2228. catch (const gdb_exception &ex)
  2229. {
  2230. /* If resuming is being aborted for any reason, delete any
  2231. single-step breakpoint resume_1 may have created, to avoid
  2232. confusing the following resumption, and to avoid leaving
  2233. single-step breakpoints perturbing other threads, in case
  2234. we're running in non-stop mode. */
  2235. if (inferior_ptid != null_ptid)
  2236. delete_single_step_breakpoints (inferior_thread ());
  2237. throw;
  2238. }
  2239. }
  2240. /* Proceeding. */
  2241. /* See infrun.h. */
  2242. /* Counter that tracks number of user visible stops. This can be used
  2243. to tell whether a command has proceeded the inferior past the
  2244. current location. This allows e.g., inferior function calls in
  2245. breakpoint commands to not interrupt the command list. When the
  2246. call finishes successfully, the inferior is standing at the same
  2247. breakpoint as if nothing happened (and so we don't call
  2248. normal_stop). */
  2249. static ULONGEST current_stop_id;
  2250. /* See infrun.h. */
  2251. ULONGEST
  2252. get_stop_id (void)
  2253. {
  2254. return current_stop_id;
  2255. }
  2256. /* Called when we report a user visible stop. */
  2257. static void
  2258. new_stop_id (void)
  2259. {
  2260. current_stop_id++;
  2261. }
  2262. /* Clear out all variables saying what to do when inferior is continued.
  2263. First do this, then set the ones you want, then call `proceed'. */
  2264. static void
  2265. clear_proceed_status_thread (struct thread_info *tp)
  2266. {
  2267. infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
  2268. /* If we're starting a new sequence, then the previous finished
  2269. single-step is no longer relevant. */
  2270. if (tp->has_pending_waitstatus ())
  2271. {
  2272. if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
  2273. {
  2274. infrun_debug_printf ("pending event of %s was a finished step. "
  2275. "Discarding.",
  2276. tp->ptid.to_string ().c_str ());
  2277. tp->clear_pending_waitstatus ();
  2278. tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
  2279. }
  2280. else
  2281. {
  2282. infrun_debug_printf
  2283. ("thread %s has pending wait status %s (currently_stepping=%d).",
  2284. tp->ptid.to_string ().c_str (),
  2285. tp->pending_waitstatus ().to_string ().c_str (),
  2286. currently_stepping (tp));
  2287. }
  2288. }
  2289. /* If this signal should not be seen by program, give it zero.
  2290. Used for debugging signals. */
  2291. if (!signal_pass_state (tp->stop_signal ()))
  2292. tp->set_stop_signal (GDB_SIGNAL_0);
  2293. tp->release_thread_fsm ();
  2294. tp->control.trap_expected = 0;
  2295. tp->control.step_range_start = 0;
  2296. tp->control.step_range_end = 0;
  2297. tp->control.may_range_step = 0;
  2298. tp->control.step_frame_id = null_frame_id;
  2299. tp->control.step_stack_frame_id = null_frame_id;
  2300. tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
  2301. tp->control.step_start_function = NULL;
  2302. tp->stop_requested = 0;
  2303. tp->control.stop_step = 0;
  2304. tp->control.proceed_to_finish = 0;
  2305. tp->control.stepping_command = 0;
  2306. /* Discard any remaining commands or status from previous stop. */
  2307. bpstat_clear (&tp->control.stop_bpstat);
  2308. }
  2309. void
  2310. clear_proceed_status (int step)
  2311. {
  2312. /* With scheduler-locking replay, stop replaying other threads if we're
  2313. not replaying the user-visible resume ptid.
  2314. This is a convenience feature to not require the user to explicitly
  2315. stop replaying the other threads. We're assuming that the user's
  2316. intent is to resume tracing the recorded process. */
  2317. if (!non_stop && scheduler_mode == schedlock_replay
  2318. && target_record_is_replaying (minus_one_ptid)
  2319. && !target_record_will_replay (user_visible_resume_ptid (step),
  2320. execution_direction))
  2321. target_record_stop_replaying ();
  2322. if (!non_stop && inferior_ptid != null_ptid)
  2323. {
  2324. ptid_t resume_ptid = user_visible_resume_ptid (step);
  2325. process_stratum_target *resume_target
  2326. = user_visible_resume_target (resume_ptid);
  2327. /* In all-stop mode, delete the per-thread status of all threads
  2328. we're about to resume, implicitly and explicitly. */
  2329. for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
  2330. clear_proceed_status_thread (tp);
  2331. }
  2332. if (inferior_ptid != null_ptid)
  2333. {
  2334. struct inferior *inferior;
  2335. if (non_stop)
  2336. {
  2337. /* If in non-stop mode, only delete the per-thread status of
  2338. the current thread. */
  2339. clear_proceed_status_thread (inferior_thread ());
  2340. }
  2341. inferior = current_inferior ();
  2342. inferior->control.stop_soon = NO_STOP_QUIETLY;
  2343. }
  2344. gdb::observers::about_to_proceed.notify ();
  2345. }
  2346. /* Returns true if TP is still stopped at a breakpoint that needs
  2347. stepping-over in order to make progress. If the breakpoint is gone
  2348. meanwhile, we can skip the whole step-over dance. */
  2349. static bool
  2350. thread_still_needs_step_over_bp (struct thread_info *tp)
  2351. {
  2352. if (tp->stepping_over_breakpoint)
  2353. {
  2354. struct regcache *regcache = get_thread_regcache (tp);
  2355. if (breakpoint_here_p (regcache->aspace (),
  2356. regcache_read_pc (regcache))
  2357. == ordinary_breakpoint_here)
  2358. return true;
  2359. tp->stepping_over_breakpoint = 0;
  2360. }
  2361. return false;
  2362. }
  2363. /* Check whether thread TP still needs to start a step-over in order
  2364. to make progress when resumed. Returns an bitwise or of enum
  2365. step_over_what bits, indicating what needs to be stepped over. */
  2366. static step_over_what
  2367. thread_still_needs_step_over (struct thread_info *tp)
  2368. {
  2369. step_over_what what = 0;
  2370. if (thread_still_needs_step_over_bp (tp))
  2371. what |= STEP_OVER_BREAKPOINT;
  2372. if (tp->stepping_over_watchpoint
  2373. && !target_have_steppable_watchpoint ())
  2374. what |= STEP_OVER_WATCHPOINT;
  2375. return what;
  2376. }
  2377. /* Returns true if scheduler locking applies. STEP indicates whether
  2378. we're about to do a step/next-like command to a thread. */
  2379. static bool
  2380. schedlock_applies (struct thread_info *tp)
  2381. {
  2382. return (scheduler_mode == schedlock_on
  2383. || (scheduler_mode == schedlock_step
  2384. && tp->control.stepping_command)
  2385. || (scheduler_mode == schedlock_replay
  2386. && target_record_will_replay (minus_one_ptid,
  2387. execution_direction)));
  2388. }
  2389. /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
  2390. stacks that have threads executing and don't have threads with
  2391. pending events. */
  2392. static void
  2393. maybe_set_commit_resumed_all_targets ()
  2394. {
  2395. scoped_restore_current_thread restore_thread;
  2396. for (inferior *inf : all_non_exited_inferiors ())
  2397. {
  2398. process_stratum_target *proc_target = inf->process_target ();
  2399. if (proc_target->commit_resumed_state)
  2400. {
  2401. /* We already set this in a previous iteration, via another
  2402. inferior sharing the process_stratum target. */
  2403. continue;
  2404. }
  2405. /* If the target has no resumed threads, it would be useless to
  2406. ask it to commit the resumed threads. */
  2407. if (!proc_target->threads_executing)
  2408. {
  2409. infrun_debug_printf ("not requesting commit-resumed for target "
  2410. "%s, no resumed threads",
  2411. proc_target->shortname ());
  2412. continue;
  2413. }
  2414. /* As an optimization, if a thread from this target has some
  2415. status to report, handle it before requiring the target to
  2416. commit its resumed threads: handling the status might lead to
  2417. resuming more threads. */
  2418. if (proc_target->has_resumed_with_pending_wait_status ())
  2419. {
  2420. infrun_debug_printf ("not requesting commit-resumed for target %s, a"
  2421. " thread has a pending waitstatus",
  2422. proc_target->shortname ());
  2423. continue;
  2424. }
  2425. switch_to_inferior_no_thread (inf);
  2426. if (target_has_pending_events ())
  2427. {
  2428. infrun_debug_printf ("not requesting commit-resumed for target %s, "
  2429. "target has pending events",
  2430. proc_target->shortname ());
  2431. continue;
  2432. }
  2433. infrun_debug_printf ("enabling commit-resumed for target %s",
  2434. proc_target->shortname ());
  2435. proc_target->commit_resumed_state = true;
  2436. }
  2437. }
  2438. /* See infrun.h. */
  2439. void
  2440. maybe_call_commit_resumed_all_targets ()
  2441. {
  2442. scoped_restore_current_thread restore_thread;
  2443. for (inferior *inf : all_non_exited_inferiors ())
  2444. {
  2445. process_stratum_target *proc_target = inf->process_target ();
  2446. if (!proc_target->commit_resumed_state)
  2447. continue;
  2448. switch_to_inferior_no_thread (inf);
  2449. infrun_debug_printf ("calling commit_resumed for target %s",
  2450. proc_target->shortname());
  2451. target_commit_resumed ();
  2452. }
  2453. }
  2454. /* To track nesting of scoped_disable_commit_resumed objects, ensuring
  2455. that only the outermost one attempts to re-enable
  2456. commit-resumed. */
  2457. static bool enable_commit_resumed = true;
  2458. /* See infrun.h. */
  2459. scoped_disable_commit_resumed::scoped_disable_commit_resumed
  2460. (const char *reason)
  2461. : m_reason (reason),
  2462. m_prev_enable_commit_resumed (enable_commit_resumed)
  2463. {
  2464. infrun_debug_printf ("reason=%s", m_reason);
  2465. enable_commit_resumed = false;
  2466. for (inferior *inf : all_non_exited_inferiors ())
  2467. {
  2468. process_stratum_target *proc_target = inf->process_target ();
  2469. if (m_prev_enable_commit_resumed)
  2470. {
  2471. /* This is the outermost instance: force all
  2472. COMMIT_RESUMED_STATE to false. */
  2473. proc_target->commit_resumed_state = false;
  2474. }
  2475. else
  2476. {
  2477. /* This is not the outermost instance, we expect
  2478. COMMIT_RESUMED_STATE to have been cleared by the
  2479. outermost instance. */
  2480. gdb_assert (!proc_target->commit_resumed_state);
  2481. }
  2482. }
  2483. }
  2484. /* See infrun.h. */
  2485. void
  2486. scoped_disable_commit_resumed::reset ()
  2487. {
  2488. if (m_reset)
  2489. return;
  2490. m_reset = true;
  2491. infrun_debug_printf ("reason=%s", m_reason);
  2492. gdb_assert (!enable_commit_resumed);
  2493. enable_commit_resumed = m_prev_enable_commit_resumed;
  2494. if (m_prev_enable_commit_resumed)
  2495. {
  2496. /* This is the outermost instance, re-enable
  2497. COMMIT_RESUMED_STATE on the targets where it's possible. */
  2498. maybe_set_commit_resumed_all_targets ();
  2499. }
  2500. else
  2501. {
  2502. /* This is not the outermost instance, we expect
  2503. COMMIT_RESUMED_STATE to still be false. */
  2504. for (inferior *inf : all_non_exited_inferiors ())
  2505. {
  2506. process_stratum_target *proc_target = inf->process_target ();
  2507. gdb_assert (!proc_target->commit_resumed_state);
  2508. }
  2509. }
  2510. }
  2511. /* See infrun.h. */
  2512. scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
  2513. {
  2514. reset ();
  2515. }
  2516. /* See infrun.h. */
  2517. void
  2518. scoped_disable_commit_resumed::reset_and_commit ()
  2519. {
  2520. reset ();
  2521. maybe_call_commit_resumed_all_targets ();
  2522. }
  2523. /* See infrun.h. */
  2524. scoped_enable_commit_resumed::scoped_enable_commit_resumed
  2525. (const char *reason)
  2526. : m_reason (reason),
  2527. m_prev_enable_commit_resumed (enable_commit_resumed)
  2528. {
  2529. infrun_debug_printf ("reason=%s", m_reason);
  2530. if (!enable_commit_resumed)
  2531. {
  2532. enable_commit_resumed = true;
  2533. /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
  2534. possible. */
  2535. maybe_set_commit_resumed_all_targets ();
  2536. maybe_call_commit_resumed_all_targets ();
  2537. }
  2538. }
  2539. /* See infrun.h. */
  2540. scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
  2541. {
  2542. infrun_debug_printf ("reason=%s", m_reason);
  2543. gdb_assert (enable_commit_resumed);
  2544. enable_commit_resumed = m_prev_enable_commit_resumed;
  2545. if (!enable_commit_resumed)
  2546. {
  2547. /* Force all COMMIT_RESUMED_STATE back to false. */
  2548. for (inferior *inf : all_non_exited_inferiors ())
  2549. {
  2550. process_stratum_target *proc_target = inf->process_target ();
  2551. proc_target->commit_resumed_state = false;
  2552. }
  2553. }
  2554. }
  2555. /* Check that all the targets we're about to resume are in non-stop
  2556. mode. Ideally, we'd only care whether all targets support
  2557. target-async, but we're not there yet. E.g., stop_all_threads
  2558. doesn't know how to handle all-stop targets. Also, the remote
  2559. protocol in all-stop mode is synchronous, irrespective of
  2560. target-async, which means that things like a breakpoint re-set
  2561. triggered by one target would try to read memory from all targets
  2562. and fail. */
  2563. static void
  2564. check_multi_target_resumption (process_stratum_target *resume_target)
  2565. {
  2566. if (!non_stop && resume_target == nullptr)
  2567. {
  2568. scoped_restore_current_thread restore_thread;
  2569. /* This is used to track whether we're resuming more than one
  2570. target. */
  2571. process_stratum_target *first_connection = nullptr;
  2572. /* The first inferior we see with a target that does not work in
  2573. always-non-stop mode. */
  2574. inferior *first_not_non_stop = nullptr;
  2575. for (inferior *inf : all_non_exited_inferiors ())
  2576. {
  2577. switch_to_inferior_no_thread (inf);
  2578. if (!target_has_execution ())
  2579. continue;
  2580. process_stratum_target *proc_target
  2581. = current_inferior ()->process_target();
  2582. if (!target_is_non_stop_p ())
  2583. first_not_non_stop = inf;
  2584. if (first_connection == nullptr)
  2585. first_connection = proc_target;
  2586. else if (first_connection != proc_target
  2587. && first_not_non_stop != nullptr)
  2588. {
  2589. switch_to_inferior_no_thread (first_not_non_stop);
  2590. proc_target = current_inferior ()->process_target();
  2591. error (_("Connection %d (%s) does not support "
  2592. "multi-target resumption."),
  2593. proc_target->connection_number,
  2594. make_target_connection_string (proc_target).c_str ());
  2595. }
  2596. }
  2597. }
  2598. }
  2599. /* Basic routine for continuing the program in various fashions.
  2600. ADDR is the address to resume at, or -1 for resume where stopped.
  2601. SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
  2602. or GDB_SIGNAL_DEFAULT for act according to how it stopped.
  2603. You should call clear_proceed_status before calling proceed. */
  2604. void
  2605. proceed (CORE_ADDR addr, enum gdb_signal siggnal)
  2606. {
  2607. INFRUN_SCOPED_DEBUG_ENTER_EXIT;
  2608. struct regcache *regcache;
  2609. struct gdbarch *gdbarch;
  2610. CORE_ADDR pc;
  2611. struct execution_control_state ecss;
  2612. struct execution_control_state *ecs = &ecss;
  2613. /* If we're stopped at a fork/vfork, follow the branch set by the
  2614. "set follow-fork-mode" command; otherwise, we'll just proceed
  2615. resuming the current thread. */
  2616. if (!follow_fork ())
  2617. {
  2618. /* The target for some reason decided not to resume. */
  2619. normal_stop ();
  2620. if (target_can_async_p ())
  2621. inferior_event_handler (INF_EXEC_COMPLETE);
  2622. return;
  2623. }
  2624. /* We'll update this if & when we switch to a new thread. */
  2625. previous_inferior_ptid = inferior_ptid;
  2626. regcache = get_current_regcache ();
  2627. gdbarch = regcache->arch ();
  2628. const address_space *aspace = regcache->aspace ();
  2629. pc = regcache_read_pc_protected (regcache);
  2630. thread_info *cur_thr = inferior_thread ();
  2631. /* Fill in with reasonable starting values. */
  2632. init_thread_stepping_state (cur_thr);
  2633. gdb_assert (!thread_is_in_step_over_chain (cur_thr));
  2634. ptid_t resume_ptid
  2635. = user_visible_resume_ptid (cur_thr->control.stepping_command);
  2636. process_stratum_target *resume_target
  2637. = user_visible_resume_target (resume_ptid);
  2638. check_multi_target_resumption (resume_target);
  2639. if (addr == (CORE_ADDR) -1)
  2640. {
  2641. if (cur_thr->stop_pc_p ()
  2642. && pc == cur_thr->stop_pc ()
  2643. && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
  2644. && execution_direction != EXEC_REVERSE)
  2645. /* There is a breakpoint at the address we will resume at,
  2646. step one instruction before inserting breakpoints so that
  2647. we do not stop right away (and report a second hit at this
  2648. breakpoint).
  2649. Note, we don't do this in reverse, because we won't
  2650. actually be executing the breakpoint insn anyway.
  2651. We'll be (un-)executing the previous instruction. */
  2652. cur_thr->stepping_over_breakpoint = 1;
  2653. else if (gdbarch_single_step_through_delay_p (gdbarch)
  2654. && gdbarch_single_step_through_delay (gdbarch,
  2655. get_current_frame ()))
  2656. /* We stepped onto an instruction that needs to be stepped
  2657. again before re-inserting the breakpoint, do so. */
  2658. cur_thr->stepping_over_breakpoint = 1;
  2659. }
  2660. else
  2661. {
  2662. regcache_write_pc (regcache, addr);
  2663. }
  2664. if (siggnal != GDB_SIGNAL_DEFAULT)
  2665. cur_thr->set_stop_signal (siggnal);
  2666. /* If an exception is thrown from this point on, make sure to
  2667. propagate GDB's knowledge of the executing state to the
  2668. frontend/user running state. */
  2669. scoped_finish_thread_state finish_state (resume_target, resume_ptid);
  2670. /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
  2671. threads (e.g., we might need to set threads stepping over
  2672. breakpoints first), from the user/frontend's point of view, all
  2673. threads in RESUME_PTID are now running. Unless we're calling an
  2674. inferior function, as in that case we pretend the inferior
  2675. doesn't run at all. */
  2676. if (!cur_thr->control.in_infcall)
  2677. set_running (resume_target, resume_ptid, true);
  2678. infrun_debug_printf ("addr=%s, signal=%s", paddress (gdbarch, addr),
  2679. gdb_signal_to_symbol_string (siggnal));
  2680. annotate_starting ();
  2681. /* Make sure that output from GDB appears before output from the
  2682. inferior. */
  2683. gdb_flush (gdb_stdout);
  2684. /* Since we've marked the inferior running, give it the terminal. A
  2685. QUIT/Ctrl-C from here on is forwarded to the target (which can
  2686. still detect attempts to unblock a stuck connection with repeated
  2687. Ctrl-C from within target_pass_ctrlc). */
  2688. target_terminal::inferior ();
  2689. /* In a multi-threaded task we may select another thread and
  2690. then continue or step.
  2691. But if a thread that we're resuming had stopped at a breakpoint,
  2692. it will immediately cause another breakpoint stop without any
  2693. execution (i.e. it will report a breakpoint hit incorrectly). So
  2694. we must step over it first.
  2695. Look for threads other than the current (TP) that reported a
  2696. breakpoint hit and haven't been resumed yet since. */
  2697. /* If scheduler locking applies, we can avoid iterating over all
  2698. threads. */
  2699. if (!non_stop && !schedlock_applies (cur_thr))
  2700. {
  2701. for (thread_info *tp : all_non_exited_threads (resume_target,
  2702. resume_ptid))
  2703. {
  2704. switch_to_thread_no_regs (tp);
  2705. /* Ignore the current thread here. It's handled
  2706. afterwards. */
  2707. if (tp == cur_thr)
  2708. continue;
  2709. if (!thread_still_needs_step_over (tp))
  2710. continue;
  2711. gdb_assert (!thread_is_in_step_over_chain (tp));
  2712. infrun_debug_printf ("need to step-over [%s] first",
  2713. tp->ptid.to_string ().c_str ());
  2714. global_thread_step_over_chain_enqueue (tp);
  2715. }
  2716. switch_to_thread (cur_thr);
  2717. }
  2718. /* Enqueue the current thread last, so that we move all other
  2719. threads over their breakpoints first. */
  2720. if (cur_thr->stepping_over_breakpoint)
  2721. global_thread_step_over_chain_enqueue (cur_thr);
  2722. /* If the thread isn't started, we'll still need to set its prev_pc,
  2723. so that switch_back_to_stepped_thread knows the thread hasn't
  2724. advanced. Must do this before resuming any thread, as in
  2725. all-stop/remote, once we resume we can't send any other packet
  2726. until the target stops again. */
  2727. cur_thr->prev_pc = regcache_read_pc_protected (regcache);
  2728. {
  2729. scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
  2730. bool step_over_started = start_step_over ();
  2731. if (step_over_info_valid_p ())
  2732. {
  2733. /* Either this thread started a new in-line step over, or some
  2734. other thread was already doing one. In either case, don't
  2735. resume anything else until the step-over is finished. */
  2736. }
  2737. else if (step_over_started && !target_is_non_stop_p ())
  2738. {
  2739. /* A new displaced stepping sequence was started. In all-stop,
  2740. we can't talk to the target anymore until it next stops. */
  2741. }
  2742. else if (!non_stop && target_is_non_stop_p ())
  2743. {
  2744. INFRUN_SCOPED_DEBUG_START_END
  2745. ("resuming threads, all-stop-on-top-of-non-stop");
  2746. /* In all-stop, but the target is always in non-stop mode.
  2747. Start all other threads that are implicitly resumed too. */
  2748. for (thread_info *tp : all_non_exited_threads (resume_target,
  2749. resume_ptid))
  2750. {
  2751. switch_to_thread_no_regs (tp);
  2752. if (!tp->inf->has_execution ())
  2753. {
  2754. infrun_debug_printf ("[%s] target has no execution",
  2755. tp->ptid.to_string ().c_str ());
  2756. continue;
  2757. }
  2758. if (tp->resumed ())
  2759. {
  2760. infrun_debug_printf ("[%s] resumed",
  2761. tp->ptid.to_string ().c_str ());
  2762. gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
  2763. continue;
  2764. }
  2765. if (thread_is_in_step_over_chain (tp))
  2766. {
  2767. infrun_debug_printf ("[%s] needs step-over",
  2768. tp->ptid.to_string ().c_str ());
  2769. continue;
  2770. }
  2771. /* If a thread of that inferior is waiting for a vfork-done
  2772. (for a detached vfork child to exec or exit), breakpoints are
  2773. removed. We must not resume any thread of that inferior, other
  2774. than the one waiting for the vfork-done. */
  2775. if (tp->inf->thread_waiting_for_vfork_done != nullptr
  2776. && tp != tp->inf->thread_waiting_for_vfork_done)
  2777. {
  2778. infrun_debug_printf ("[%s] another thread of this inferior is "
  2779. "waiting for vfork-done",
  2780. tp->ptid.to_string ().c_str ());
  2781. continue;
  2782. }
  2783. infrun_debug_printf ("resuming %s",
  2784. tp->ptid.to_string ().c_str ());
  2785. reset_ecs (ecs, tp);
  2786. switch_to_thread (tp);
  2787. keep_going_pass_signal (ecs);
  2788. if (!ecs->wait_some_more)
  2789. error (_("Command aborted."));
  2790. }
  2791. }
  2792. else if (!cur_thr->resumed ()
  2793. && !thread_is_in_step_over_chain (cur_thr)
  2794. /* In non-stop, forbid resuming a thread if some other thread of
  2795. that inferior is waiting for a vfork-done event (this means
  2796. breakpoints are out for this inferior). */
  2797. && !(non_stop
  2798. && cur_thr->inf->thread_waiting_for_vfork_done != nullptr))
  2799. {
  2800. /* The thread wasn't started, and isn't queued, run it now. */
  2801. reset_ecs (ecs, cur_thr);
  2802. switch_to_thread (cur_thr);
  2803. keep_going_pass_signal (ecs);
  2804. if (!ecs->wait_some_more)
  2805. error (_("Command aborted."));
  2806. }
  2807. disable_commit_resumed.reset_and_commit ();
  2808. }
  2809. finish_state.release ();
  2810. /* If we've switched threads above, switch back to the previously
  2811. current thread. We don't want the user to see a different
  2812. selected thread. */
  2813. switch_to_thread (cur_thr);
  2814. /* Tell the event loop to wait for it to stop. If the target
  2815. supports asynchronous execution, it'll do this from within
  2816. target_resume. */
  2817. if (!target_can_async_p ())
  2818. mark_async_event_handler (infrun_async_inferior_event_token);
  2819. }
  2820. /* Start remote-debugging of a machine over a serial link. */
  2821. void
  2822. start_remote (int from_tty)
  2823. {
  2824. inferior *inf = current_inferior ();
  2825. inf->control.stop_soon = STOP_QUIETLY_REMOTE;
  2826. /* Always go on waiting for the target, regardless of the mode. */
  2827. /* FIXME: cagney/1999-09-23: At present it isn't possible to
  2828. indicate to wait_for_inferior that a target should timeout if
  2829. nothing is returned (instead of just blocking). Because of this,
  2830. targets expecting an immediate response need to, internally, set
  2831. things up so that the target_wait() is forced to eventually
  2832. timeout. */
  2833. /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
  2834. differentiate to its caller what the state of the target is after
  2835. the initial open has been performed. Here we're assuming that
  2836. the target has stopped. It should be possible to eventually have
  2837. target_open() return to the caller an indication that the target
  2838. is currently running and GDB state should be set to the same as
  2839. for an async run. */
  2840. wait_for_inferior (inf);
  2841. /* Now that the inferior has stopped, do any bookkeeping like
  2842. loading shared libraries. We want to do this before normal_stop,
  2843. so that the displayed frame is up to date. */
  2844. post_create_inferior (from_tty);
  2845. normal_stop ();
  2846. }
  2847. /* Initialize static vars when a new inferior begins. */
  2848. void
  2849. init_wait_for_inferior (void)
  2850. {
  2851. /* These are meaningless until the first time through wait_for_inferior. */
  2852. breakpoint_init_inferior (inf_starting);
  2853. clear_proceed_status (0);
  2854. nullify_last_target_wait_ptid ();
  2855. previous_inferior_ptid = inferior_ptid;
  2856. }
  2857. static void handle_inferior_event (struct execution_control_state *ecs);
  2858. static void handle_step_into_function (struct gdbarch *gdbarch,
  2859. struct execution_control_state *ecs);
  2860. static void handle_step_into_function_backward (struct gdbarch *gdbarch,
  2861. struct execution_control_state *ecs);
  2862. static void handle_signal_stop (struct execution_control_state *ecs);
  2863. static void check_exception_resume (struct execution_control_state *,
  2864. struct frame_info *);
  2865. static void end_stepping_range (struct execution_control_state *ecs);
  2866. static void stop_waiting (struct execution_control_state *ecs);
  2867. static void keep_going (struct execution_control_state *ecs);
  2868. static void process_event_stop_test (struct execution_control_state *ecs);
  2869. static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
  2870. /* This function is attached as a "thread_stop_requested" observer.
  2871. Cleanup local state that assumed the PTID was to be resumed, and
  2872. report the stop to the frontend. */
  2873. static void
  2874. infrun_thread_stop_requested (ptid_t ptid)
  2875. {
  2876. process_stratum_target *curr_target = current_inferior ()->process_target ();
  2877. /* PTID was requested to stop. If the thread was already stopped,
  2878. but the user/frontend doesn't know about that yet (e.g., the
  2879. thread had been temporarily paused for some step-over), set up
  2880. for reporting the stop now. */
  2881. for (thread_info *tp : all_threads (curr_target, ptid))
  2882. {
  2883. if (tp->state != THREAD_RUNNING)
  2884. continue;
  2885. if (tp->executing ())
  2886. continue;
  2887. /* Remove matching threads from the step-over queue, so
  2888. start_step_over doesn't try to resume them
  2889. automatically. */
  2890. if (thread_is_in_step_over_chain (tp))
  2891. global_thread_step_over_chain_remove (tp);
  2892. /* If the thread is stopped, but the user/frontend doesn't
  2893. know about that yet, queue a pending event, as if the
  2894. thread had just stopped now. Unless the thread already had
  2895. a pending event. */
  2896. if (!tp->has_pending_waitstatus ())
  2897. {
  2898. target_waitstatus ws;
  2899. ws.set_stopped (GDB_SIGNAL_0);
  2900. tp->set_pending_waitstatus (ws);
  2901. }
  2902. /* Clear the inline-frame state, since we're re-processing the
  2903. stop. */
  2904. clear_inline_frame_state (tp);
  2905. /* If this thread was paused because some other thread was
  2906. doing an inline-step over, let that finish first. Once
  2907. that happens, we'll restart all threads and consume pending
  2908. stop events then. */
  2909. if (step_over_info_valid_p ())
  2910. continue;
  2911. /* Otherwise we can process the (new) pending event now. Set
  2912. it so this pending event is considered by
  2913. do_target_wait. */
  2914. tp->set_resumed (true);
  2915. }
  2916. }
  2917. static void
  2918. infrun_thread_thread_exit (struct thread_info *tp, int silent)
  2919. {
  2920. if (target_last_proc_target == tp->inf->process_target ()
  2921. && target_last_wait_ptid == tp->ptid)
  2922. nullify_last_target_wait_ptid ();
  2923. }
  2924. /* Delete the step resume, single-step and longjmp/exception resume
  2925. breakpoints of TP. */
  2926. static void
  2927. delete_thread_infrun_breakpoints (struct thread_info *tp)
  2928. {
  2929. delete_step_resume_breakpoint (tp);
  2930. delete_exception_resume_breakpoint (tp);
  2931. delete_single_step_breakpoints (tp);
  2932. }
  2933. /* If the target still has execution, call FUNC for each thread that
  2934. just stopped. In all-stop, that's all the non-exited threads; in
  2935. non-stop, that's the current thread, only. */
  2936. typedef void (*for_each_just_stopped_thread_callback_func)
  2937. (struct thread_info *tp);
  2938. static void
  2939. for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
  2940. {
  2941. if (!target_has_execution () || inferior_ptid == null_ptid)
  2942. return;
  2943. if (target_is_non_stop_p ())
  2944. {
  2945. /* If in non-stop mode, only the current thread stopped. */
  2946. func (inferior_thread ());
  2947. }
  2948. else
  2949. {
  2950. /* In all-stop mode, all threads have stopped. */
  2951. for (thread_info *tp : all_non_exited_threads ())
  2952. func (tp);
  2953. }
  2954. }
  2955. /* Delete the step resume and longjmp/exception resume breakpoints of
  2956. the threads that just stopped. */
  2957. static void
  2958. delete_just_stopped_threads_infrun_breakpoints (void)
  2959. {
  2960. for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
  2961. }
  2962. /* Delete the single-step breakpoints of the threads that just
  2963. stopped. */
  2964. static void
  2965. delete_just_stopped_threads_single_step_breakpoints (void)
  2966. {
  2967. for_each_just_stopped_thread (delete_single_step_breakpoints);
  2968. }
  2969. /* See infrun.h. */
  2970. void
  2971. print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
  2972. const struct target_waitstatus &ws)
  2973. {
  2974. infrun_debug_printf ("target_wait (%s [%s], status) =",
  2975. waiton_ptid.to_string ().c_str (),
  2976. target_pid_to_str (waiton_ptid).c_str ());
  2977. infrun_debug_printf (" %s [%s],",
  2978. result_ptid.to_string ().c_str (),
  2979. target_pid_to_str (result_ptid).c_str ());
  2980. infrun_debug_printf (" %s", ws.to_string ().c_str ());
  2981. }
  2982. /* Select a thread at random, out of those which are resumed and have
  2983. had events. */
  2984. static struct thread_info *
  2985. random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
  2986. {
  2987. process_stratum_target *proc_target = inf->process_target ();
  2988. thread_info *thread
  2989. = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
  2990. if (thread == nullptr)
  2991. {
  2992. infrun_debug_printf ("None found.");
  2993. return nullptr;
  2994. }
  2995. infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
  2996. gdb_assert (thread->resumed ());
  2997. gdb_assert (thread->has_pending_waitstatus ());
  2998. return thread;
  2999. }
  3000. /* Wrapper for target_wait that first checks whether threads have
  3001. pending statuses to report before actually asking the target for
  3002. more events. INF is the inferior we're using to call target_wait
  3003. on. */
  3004. static ptid_t
  3005. do_target_wait_1 (inferior *inf, ptid_t ptid,
  3006. target_waitstatus *status, target_wait_flags options)
  3007. {
  3008. struct thread_info *tp;
  3009. /* We know that we are looking for an event in the target of inferior
  3010. INF, but we don't know which thread the event might come from. As
  3011. such we want to make sure that INFERIOR_PTID is reset so that none of
  3012. the wait code relies on it - doing so is always a mistake. */
  3013. switch_to_inferior_no_thread (inf);
  3014. /* First check if there is a resumed thread with a wait status
  3015. pending. */
  3016. if (ptid == minus_one_ptid || ptid.is_pid ())
  3017. {
  3018. tp = random_pending_event_thread (inf, ptid);
  3019. }
  3020. else
  3021. {
  3022. infrun_debug_printf ("Waiting for specific thread %s.",
  3023. ptid.to_string ().c_str ());
  3024. /* We have a specific thread to check. */
  3025. tp = find_thread_ptid (inf, ptid);
  3026. gdb_assert (tp != NULL);
  3027. if (!tp->has_pending_waitstatus ())
  3028. tp = NULL;
  3029. }
  3030. if (tp != NULL
  3031. && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
  3032. || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
  3033. {
  3034. struct regcache *regcache = get_thread_regcache (tp);
  3035. struct gdbarch *gdbarch = regcache->arch ();
  3036. CORE_ADDR pc;
  3037. int discard = 0;
  3038. pc = regcache_read_pc (regcache);
  3039. if (pc != tp->stop_pc ())
  3040. {
  3041. infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
  3042. tp->ptid.to_string ().c_str (),
  3043. paddress (gdbarch, tp->stop_pc ()),
  3044. paddress (gdbarch, pc));
  3045. discard = 1;
  3046. }
  3047. else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
  3048. {
  3049. infrun_debug_printf ("previous breakpoint of %s, at %s gone",
  3050. tp->ptid.to_string ().c_str (),
  3051. paddress (gdbarch, pc));
  3052. discard = 1;
  3053. }
  3054. if (discard)
  3055. {
  3056. infrun_debug_printf ("pending event of %s cancelled.",
  3057. tp->ptid.to_string ().c_str ());
  3058. tp->clear_pending_waitstatus ();
  3059. target_waitstatus ws;
  3060. ws.set_spurious ();
  3061. tp->set_pending_waitstatus (ws);
  3062. tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
  3063. }
  3064. }
  3065. if (tp != NULL)
  3066. {
  3067. infrun_debug_printf ("Using pending wait status %s for %s.",
  3068. tp->pending_waitstatus ().to_string ().c_str (),
  3069. tp->ptid.to_string ().c_str ());
  3070. /* Now that we've selected our final event LWP, un-adjust its PC
  3071. if it was a software breakpoint (and the target doesn't
  3072. always adjust the PC itself). */
  3073. if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
  3074. && !target_supports_stopped_by_sw_breakpoint ())
  3075. {
  3076. struct regcache *regcache;
  3077. struct gdbarch *gdbarch;
  3078. int decr_pc;
  3079. regcache = get_thread_regcache (tp);
  3080. gdbarch = regcache->arch ();
  3081. decr_pc = gdbarch_decr_pc_after_break (gdbarch);
  3082. if (decr_pc != 0)
  3083. {
  3084. CORE_ADDR pc;
  3085. pc = regcache_read_pc (regcache);
  3086. regcache_write_pc (regcache, pc + decr_pc);
  3087. }
  3088. }
  3089. tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
  3090. *status = tp->pending_waitstatus ();
  3091. tp->clear_pending_waitstatus ();
  3092. /* Wake up the event loop again, until all pending events are
  3093. processed. */
  3094. if (target_is_async_p ())
  3095. mark_async_event_handler (infrun_async_inferior_event_token);
  3096. return tp->ptid;
  3097. }
  3098. /* But if we don't find one, we'll have to wait. */
  3099. /* We can't ask a non-async target to do a non-blocking wait, so this will be
  3100. a blocking wait. */
  3101. if (!target_can_async_p ())
  3102. options &= ~TARGET_WNOHANG;
  3103. return target_wait (ptid, status, options);
  3104. }
  3105. /* Wrapper for target_wait that first checks whether threads have
  3106. pending statuses to report before actually asking the target for
  3107. more events. Polls for events from all inferiors/targets. */
  3108. static bool
  3109. do_target_wait (execution_control_state *ecs, target_wait_flags options)
  3110. {
  3111. int num_inferiors = 0;
  3112. int random_selector;
  3113. /* For fairness, we pick the first inferior/target to poll at random
  3114. out of all inferiors that may report events, and then continue
  3115. polling the rest of the inferior list starting from that one in a
  3116. circular fashion until the whole list is polled once. */
  3117. auto inferior_matches = [] (inferior *inf)
  3118. {
  3119. return inf->process_target () != nullptr;
  3120. };
  3121. /* First see how many matching inferiors we have. */
  3122. for (inferior *inf : all_inferiors ())
  3123. if (inferior_matches (inf))
  3124. num_inferiors++;
  3125. if (num_inferiors == 0)
  3126. {
  3127. ecs->ws.set_ignore ();
  3128. return false;
  3129. }
  3130. /* Now randomly pick an inferior out of those that matched. */
  3131. random_selector = (int)
  3132. ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
  3133. if (num_inferiors > 1)
  3134. infrun_debug_printf ("Found %d inferiors, starting at #%d",
  3135. num_inferiors, random_selector);
  3136. /* Select the Nth inferior that matched. */
  3137. inferior *selected = nullptr;
  3138. for (inferior *inf : all_inferiors ())
  3139. if (inferior_matches (inf))
  3140. if (random_selector-- == 0)
  3141. {
  3142. selected = inf;
  3143. break;
  3144. }
  3145. /* Now poll for events out of each of the matching inferior's
  3146. targets, starting from the selected one. */
  3147. auto do_wait = [&] (inferior *inf)
  3148. {
  3149. ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
  3150. ecs->target = inf->process_target ();
  3151. return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
  3152. };
  3153. /* Needed in 'all-stop + target-non-stop' mode, because we end up
  3154. here spuriously after the target is all stopped and we've already
  3155. reported the stop to the user, polling for events. */
  3156. scoped_restore_current_thread restore_thread;
  3157. intrusive_list_iterator<inferior> start
  3158. = inferior_list.iterator_to (*selected);
  3159. for (intrusive_list_iterator<inferior> it = start;
  3160. it != inferior_list.end ();
  3161. ++it)
  3162. {
  3163. inferior *inf = &*it;
  3164. if (inferior_matches (inf) && do_wait (inf))
  3165. return true;
  3166. }
  3167. for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
  3168. it != start;
  3169. ++it)
  3170. {
  3171. inferior *inf = &*it;
  3172. if (inferior_matches (inf) && do_wait (inf))
  3173. return true;
  3174. }
  3175. ecs->ws.set_ignore ();
  3176. return false;
  3177. }
  3178. /* An event reported by wait_one. */
  3179. struct wait_one_event
  3180. {
  3181. /* The target the event came out of. */
  3182. process_stratum_target *target;
  3183. /* The PTID the event was for. */
  3184. ptid_t ptid;
  3185. /* The waitstatus. */
  3186. target_waitstatus ws;
  3187. };
  3188. static bool handle_one (const wait_one_event &event);
  3189. /* Prepare and stabilize the inferior for detaching it. E.g.,
  3190. detaching while a thread is displaced stepping is a recipe for
  3191. crashing it, as nothing would readjust the PC out of the scratch
  3192. pad. */
  3193. void
  3194. prepare_for_detach (void)
  3195. {
  3196. struct inferior *inf = current_inferior ();
  3197. ptid_t pid_ptid = ptid_t (inf->pid);
  3198. scoped_restore_current_thread restore_thread;
  3199. scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
  3200. /* Remove all threads of INF from the global step-over chain. We
  3201. want to stop any ongoing step-over, not start any new one. */
  3202. thread_step_over_list_safe_range range
  3203. = make_thread_step_over_list_safe_range (global_thread_step_over_list);
  3204. for (thread_info *tp : range)
  3205. if (tp->inf == inf)
  3206. {
  3207. infrun_debug_printf ("removing thread %s from global step over chain",
  3208. tp->ptid.to_string ().c_str ());
  3209. global_thread_step_over_chain_remove (tp);
  3210. }
  3211. /* If we were already in the middle of an inline step-over, and the
  3212. thread stepping belongs to the inferior we're detaching, we need
  3213. to restart the threads of other inferiors. */
  3214. if (step_over_info.thread != -1)
  3215. {
  3216. infrun_debug_printf ("inline step-over in-process while detaching");
  3217. thread_info *thr = find_thread_global_id (step_over_info.thread);
  3218. if (thr->inf == inf)
  3219. {
  3220. /* Since we removed threads of INF from the step-over chain,
  3221. we know this won't start a step-over for INF. */
  3222. clear_step_over_info ();
  3223. if (target_is_non_stop_p ())
  3224. {
  3225. /* Start a new step-over in another thread if there's
  3226. one that needs it. */
  3227. start_step_over ();
  3228. /* Restart all other threads (except the
  3229. previously-stepping thread, since that one is still
  3230. running). */
  3231. if (!step_over_info_valid_p ())
  3232. restart_threads (thr);
  3233. }
  3234. }
  3235. }
  3236. if (displaced_step_in_progress (inf))
  3237. {
  3238. infrun_debug_printf ("displaced-stepping in-process while detaching");
  3239. /* Stop threads currently displaced stepping, aborting it. */
  3240. for (thread_info *thr : inf->non_exited_threads ())
  3241. {
  3242. if (thr->displaced_step_state.in_progress ())
  3243. {
  3244. if (thr->executing ())
  3245. {
  3246. if (!thr->stop_requested)
  3247. {
  3248. target_stop (thr->ptid);
  3249. thr->stop_requested = true;
  3250. }
  3251. }
  3252. else
  3253. thr->set_resumed (false);
  3254. }
  3255. }
  3256. while (displaced_step_in_progress (inf))
  3257. {
  3258. wait_one_event event;
  3259. event.target = inf->process_target ();
  3260. event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
  3261. if (debug_infrun)
  3262. print_target_wait_results (pid_ptid, event.ptid, event.ws);
  3263. handle_one (event);
  3264. }
  3265. /* It's OK to leave some of the threads of INF stopped, since
  3266. they'll be detached shortly. */
  3267. }
  3268. }
  3269. /* Wait for control to return from inferior to debugger.
  3270. If inferior gets a signal, we may decide to start it up again
  3271. instead of returning. That is why there is a loop in this function.
  3272. When this function actually returns it means the inferior
  3273. should be left stopped and GDB should read more commands. */
  3274. static void
  3275. wait_for_inferior (inferior *inf)
  3276. {
  3277. infrun_debug_printf ("wait_for_inferior ()");
  3278. SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
  3279. /* If an error happens while handling the event, propagate GDB's
  3280. knowledge of the executing state to the frontend/user running
  3281. state. */
  3282. scoped_finish_thread_state finish_state
  3283. (inf->process_target (), minus_one_ptid);
  3284. while (1)
  3285. {
  3286. struct execution_control_state ecss;
  3287. struct execution_control_state *ecs = &ecss;
  3288. overlay_cache_invalid = 1;
  3289. /* Flush target cache before starting to handle each event.
  3290. Target was running and cache could be stale. This is just a
  3291. heuristic. Running threads may modify target memory, but we
  3292. don't get any event. */
  3293. target_dcache_invalidate ();
  3294. ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, 0);
  3295. ecs->target = inf->process_target ();
  3296. if (debug_infrun)
  3297. print_target_wait_results (minus_one_ptid, ecs->ptid, ecs->ws);
  3298. /* Now figure out what to do with the result of the result. */
  3299. handle_inferior_event (ecs);
  3300. if (!ecs->wait_some_more)
  3301. break;
  3302. }
  3303. /* No error, don't finish the state yet. */
  3304. finish_state.release ();
  3305. }
  3306. /* Cleanup that reinstalls the readline callback handler, if the
  3307. target is running in the background. If while handling the target
  3308. event something triggered a secondary prompt, like e.g., a
  3309. pagination prompt, we'll have removed the callback handler (see
  3310. gdb_readline_wrapper_line). Need to do this as we go back to the
  3311. event loop, ready to process further input. Note this has no
  3312. effect if the handler hasn't actually been removed, because calling
  3313. rl_callback_handler_install resets the line buffer, thus losing
  3314. input. */
  3315. static void
  3316. reinstall_readline_callback_handler_cleanup ()
  3317. {
  3318. struct ui *ui = current_ui;
  3319. if (!ui->async)
  3320. {
  3321. /* We're not going back to the top level event loop yet. Don't
  3322. install the readline callback, as it'd prep the terminal,
  3323. readline-style (raw, noecho) (e.g., --batch). We'll install
  3324. it the next time the prompt is displayed, when we're ready
  3325. for input. */
  3326. return;
  3327. }
  3328. if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
  3329. gdb_rl_callback_handler_reinstall ();
  3330. }
  3331. /* Clean up the FSMs of threads that are now stopped. In non-stop,
  3332. that's just the event thread. In all-stop, that's all threads. */
  3333. static void
  3334. clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
  3335. {
  3336. if (ecs->event_thread != nullptr
  3337. && ecs->event_thread->thread_fsm () != nullptr)
  3338. ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
  3339. if (!non_stop)
  3340. {
  3341. for (thread_info *thr : all_non_exited_threads ())
  3342. {
  3343. if (thr->thread_fsm () == nullptr)
  3344. continue;
  3345. if (thr == ecs->event_thread)
  3346. continue;
  3347. switch_to_thread (thr);
  3348. thr->thread_fsm ()->clean_up (thr);
  3349. }
  3350. if (ecs->event_thread != nullptr)
  3351. switch_to_thread (ecs->event_thread);
  3352. }
  3353. }
  3354. /* Helper for all_uis_check_sync_execution_done that works on the
  3355. current UI. */
  3356. static void
  3357. check_curr_ui_sync_execution_done (void)
  3358. {
  3359. struct ui *ui = current_ui;
  3360. if (ui->prompt_state == PROMPT_NEEDED
  3361. && ui->async
  3362. && !gdb_in_secondary_prompt_p (ui))
  3363. {
  3364. target_terminal::ours ();
  3365. gdb::observers::sync_execution_done.notify ();
  3366. ui_register_input_event_handler (ui);
  3367. }
  3368. }
  3369. /* See infrun.h. */
  3370. void
  3371. all_uis_check_sync_execution_done (void)
  3372. {
  3373. SWITCH_THRU_ALL_UIS ()
  3374. {
  3375. check_curr_ui_sync_execution_done ();
  3376. }
  3377. }
  3378. /* See infrun.h. */
  3379. void
  3380. all_uis_on_sync_execution_starting (void)
  3381. {
  3382. SWITCH_THRU_ALL_UIS ()
  3383. {
  3384. if (current_ui->prompt_state == PROMPT_NEEDED)
  3385. async_disable_stdin ();
  3386. }
  3387. }
  3388. /* Asynchronous version of wait_for_inferior. It is called by the
  3389. event loop whenever a change of state is detected on the file
  3390. descriptor corresponding to the target. It can be called more than
  3391. once to complete a single execution command. In such cases we need
  3392. to keep the state in a global variable ECSS. If it is the last time
  3393. that this function is called for a single execution command, then
  3394. report to the user that the inferior has stopped, and do the
  3395. necessary cleanups. */
  3396. void
  3397. fetch_inferior_event ()
  3398. {
  3399. INFRUN_SCOPED_DEBUG_ENTER_EXIT;
  3400. struct execution_control_state ecss;
  3401. struct execution_control_state *ecs = &ecss;
  3402. int cmd_done = 0;
  3403. /* Events are always processed with the main UI as current UI. This
  3404. way, warnings, debug output, etc. are always consistently sent to
  3405. the main console. */
  3406. scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
  3407. /* Temporarily disable pagination. Otherwise, the user would be
  3408. given an option to press 'q' to quit, which would cause an early
  3409. exit and could leave GDB in a half-baked state. */
  3410. scoped_restore save_pagination
  3411. = make_scoped_restore (&pagination_enabled, false);
  3412. /* End up with readline processing input, if necessary. */
  3413. {
  3414. SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
  3415. /* We're handling a live event, so make sure we're doing live
  3416. debugging. If we're looking at traceframes while the target is
  3417. running, we're going to need to get back to that mode after
  3418. handling the event. */
  3419. gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
  3420. if (non_stop)
  3421. {
  3422. maybe_restore_traceframe.emplace ();
  3423. set_current_traceframe (-1);
  3424. }
  3425. /* The user/frontend should not notice a thread switch due to
  3426. internal events. Make sure we revert to the user selected
  3427. thread and frame after handling the event and running any
  3428. breakpoint commands. */
  3429. scoped_restore_current_thread restore_thread;
  3430. overlay_cache_invalid = 1;
  3431. /* Flush target cache before starting to handle each event. Target
  3432. was running and cache could be stale. This is just a heuristic.
  3433. Running threads may modify target memory, but we don't get any
  3434. event. */
  3435. target_dcache_invalidate ();
  3436. scoped_restore save_exec_dir
  3437. = make_scoped_restore (&execution_direction,
  3438. target_execution_direction ());
  3439. /* Allow targets to pause their resumed threads while we handle
  3440. the event. */
  3441. scoped_disable_commit_resumed disable_commit_resumed ("handling event");
  3442. if (!do_target_wait (ecs, TARGET_WNOHANG))
  3443. {
  3444. infrun_debug_printf ("do_target_wait returned no event");
  3445. disable_commit_resumed.reset_and_commit ();
  3446. return;
  3447. }
  3448. gdb_assert (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
  3449. /* Switch to the target that generated the event, so we can do
  3450. target calls. */
  3451. switch_to_target_no_thread (ecs->target);
  3452. if (debug_infrun)
  3453. print_target_wait_results (minus_one_ptid, ecs->ptid, ecs->ws);
  3454. /* If an error happens while handling the event, propagate GDB's
  3455. knowledge of the executing state to the frontend/user running
  3456. state. */
  3457. ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs->ptid;
  3458. scoped_finish_thread_state finish_state (ecs->target, finish_ptid);
  3459. /* Get executed before scoped_restore_current_thread above to apply
  3460. still for the thread which has thrown the exception. */
  3461. auto defer_bpstat_clear
  3462. = make_scope_exit (bpstat_clear_actions);
  3463. auto defer_delete_threads
  3464. = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
  3465. /* Now figure out what to do with the result of the result. */
  3466. handle_inferior_event (ecs);
  3467. if (!ecs->wait_some_more)
  3468. {
  3469. struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
  3470. bool should_stop = true;
  3471. struct thread_info *thr = ecs->event_thread;
  3472. delete_just_stopped_threads_infrun_breakpoints ();
  3473. if (thr != nullptr && thr->thread_fsm () != nullptr)
  3474. should_stop = thr->thread_fsm ()->should_stop (thr);
  3475. if (!should_stop)
  3476. {
  3477. keep_going (ecs);
  3478. }
  3479. else
  3480. {
  3481. bool should_notify_stop = true;
  3482. int proceeded = 0;
  3483. clean_up_just_stopped_threads_fsms (ecs);
  3484. if (thr != nullptr && thr->thread_fsm () != nullptr)
  3485. should_notify_stop
  3486. = thr->thread_fsm ()->should_notify_stop ();
  3487. if (should_notify_stop)
  3488. {
  3489. /* We may not find an inferior if this was a process exit. */
  3490. if (inf == NULL || inf->control.stop_soon == NO_STOP_QUIETLY)
  3491. proceeded = normal_stop ();
  3492. }
  3493. if (!proceeded)
  3494. {
  3495. inferior_event_handler (INF_EXEC_COMPLETE);
  3496. cmd_done = 1;
  3497. }
  3498. /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
  3499. previously selected thread is gone. We have two
  3500. choices - switch to no thread selected, or restore the
  3501. previously selected thread (now exited). We chose the
  3502. later, just because that's what GDB used to do. After
  3503. this, "info threads" says "The current thread <Thread
  3504. ID 2> has terminated." instead of "No thread
  3505. selected.". */
  3506. if (!non_stop
  3507. && cmd_done
  3508. && ecs->ws.kind () != TARGET_WAITKIND_NO_RESUMED)
  3509. restore_thread.dont_restore ();
  3510. }
  3511. }
  3512. defer_delete_threads.release ();
  3513. defer_bpstat_clear.release ();
  3514. /* No error, don't finish the thread states yet. */
  3515. finish_state.release ();
  3516. disable_commit_resumed.reset_and_commit ();
  3517. /* This scope is used to ensure that readline callbacks are
  3518. reinstalled here. */
  3519. }
  3520. /* If a UI was in sync execution mode, and now isn't, restore its
  3521. prompt (a synchronous execution command has finished, and we're
  3522. ready for input). */
  3523. all_uis_check_sync_execution_done ();
  3524. if (cmd_done
  3525. && exec_done_display_p
  3526. && (inferior_ptid == null_ptid
  3527. || inferior_thread ()->state != THREAD_RUNNING))
  3528. gdb_printf (_("completed.\n"));
  3529. }
  3530. /* See infrun.h. */
  3531. void
  3532. set_step_info (thread_info *tp, struct frame_info *frame,
  3533. struct symtab_and_line sal)
  3534. {
  3535. /* This can be removed once this function no longer implicitly relies on the
  3536. inferior_ptid value. */
  3537. gdb_assert (inferior_ptid == tp->ptid);
  3538. tp->control.step_frame_id = get_frame_id (frame);
  3539. tp->control.step_stack_frame_id = get_stack_frame_id (frame);
  3540. tp->current_symtab = sal.symtab;
  3541. tp->current_line = sal.line;
  3542. infrun_debug_printf
  3543. ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
  3544. tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
  3545. tp->current_line,
  3546. tp->control.step_frame_id.to_string ().c_str (),
  3547. tp->control.step_stack_frame_id.to_string ().c_str ());
  3548. }
  3549. /* Clear context switchable stepping state. */
  3550. void
  3551. init_thread_stepping_state (struct thread_info *tss)
  3552. {
  3553. tss->stepped_breakpoint = 0;
  3554. tss->stepping_over_breakpoint = 0;
  3555. tss->stepping_over_watchpoint = 0;
  3556. tss->step_after_step_resume_breakpoint = 0;
  3557. }
  3558. /* See infrun.h. */
  3559. void
  3560. set_last_target_status (process_stratum_target *target, ptid_t ptid,
  3561. const target_waitstatus &status)
  3562. {
  3563. target_last_proc_target = target;
  3564. target_last_wait_ptid = ptid;
  3565. target_last_waitstatus = status;
  3566. }
  3567. /* See infrun.h. */
  3568. void
  3569. get_last_target_status (process_stratum_target **target, ptid_t *ptid,
  3570. target_waitstatus *status)
  3571. {
  3572. if (target != nullptr)
  3573. *target = target_last_proc_target;
  3574. if (ptid != nullptr)
  3575. *ptid = target_last_wait_ptid;
  3576. if (status != nullptr)
  3577. *status = target_last_waitstatus;
  3578. }
  3579. /* See infrun.h. */
  3580. void
  3581. nullify_last_target_wait_ptid (void)
  3582. {
  3583. target_last_proc_target = nullptr;
  3584. target_last_wait_ptid = minus_one_ptid;
  3585. target_last_waitstatus = {};
  3586. }
  3587. /* Switch thread contexts. */
  3588. static void
  3589. context_switch (execution_control_state *ecs)
  3590. {
  3591. if (ecs->ptid != inferior_ptid
  3592. && (inferior_ptid == null_ptid
  3593. || ecs->event_thread != inferior_thread ()))
  3594. {
  3595. infrun_debug_printf ("Switching context from %s to %s",
  3596. inferior_ptid.to_string ().c_str (),
  3597. ecs->ptid.to_string ().c_str ());
  3598. }
  3599. switch_to_thread (ecs->event_thread);
  3600. }
  3601. /* If the target can't tell whether we've hit breakpoints
  3602. (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
  3603. check whether that could have been caused by a breakpoint. If so,
  3604. adjust the PC, per gdbarch_decr_pc_after_break. */
  3605. static void
  3606. adjust_pc_after_break (struct thread_info *thread,
  3607. const target_waitstatus &ws)
  3608. {
  3609. struct regcache *regcache;
  3610. struct gdbarch *gdbarch;
  3611. CORE_ADDR breakpoint_pc, decr_pc;
  3612. /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
  3613. we aren't, just return.
  3614. We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
  3615. affected by gdbarch_decr_pc_after_break. Other waitkinds which are
  3616. implemented by software breakpoints should be handled through the normal
  3617. breakpoint layer.
  3618. NOTE drow/2004-01-31: On some targets, breakpoints may generate
  3619. different signals (SIGILL or SIGEMT for instance), but it is less
  3620. clear where the PC is pointing afterwards. It may not match
  3621. gdbarch_decr_pc_after_break. I don't know any specific target that
  3622. generates these signals at breakpoints (the code has been in GDB since at
  3623. least 1992) so I can not guess how to handle them here.
  3624. In earlier versions of GDB, a target with
  3625. gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
  3626. watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
  3627. target with both of these set in GDB history, and it seems unlikely to be
  3628. correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
  3629. if (ws.kind () != TARGET_WAITKIND_STOPPED)
  3630. return;
  3631. if (ws.sig () != GDB_SIGNAL_TRAP)
  3632. return;
  3633. /* In reverse execution, when a breakpoint is hit, the instruction
  3634. under it has already been de-executed. The reported PC always
  3635. points at the breakpoint address, so adjusting it further would
  3636. be wrong. E.g., consider this case on a decr_pc_after_break == 1
  3637. architecture:
  3638. B1 0x08000000 : INSN1
  3639. B2 0x08000001 : INSN2
  3640. 0x08000002 : INSN3
  3641. PC -> 0x08000003 : INSN4
  3642. Say you're stopped at 0x08000003 as above. Reverse continuing
  3643. from that point should hit B2 as below. Reading the PC when the
  3644. SIGTRAP is reported should read 0x08000001 and INSN2 should have
  3645. been de-executed already.
  3646. B1 0x08000000 : INSN1
  3647. B2 PC -> 0x08000001 : INSN2
  3648. 0x08000002 : INSN3
  3649. 0x08000003 : INSN4
  3650. We can't apply the same logic as for forward execution, because
  3651. we would wrongly adjust the PC to 0x08000000, since there's a
  3652. breakpoint at PC - 1. We'd then report a hit on B1, although
  3653. INSN1 hadn't been de-executed yet. Doing nothing is the correct
  3654. behaviour. */
  3655. if (execution_direction == EXEC_REVERSE)
  3656. return;
  3657. /* If the target can tell whether the thread hit a SW breakpoint,
  3658. trust it. Targets that can tell also adjust the PC
  3659. themselves. */
  3660. if (target_supports_stopped_by_sw_breakpoint ())
  3661. return;
  3662. /* Note that relying on whether a breakpoint is planted in memory to
  3663. determine this can fail. E.g,. the breakpoint could have been
  3664. removed since. Or the thread could have been told to step an
  3665. instruction the size of a breakpoint instruction, and only
  3666. _after_ was a breakpoint inserted at its address. */
  3667. /* If this target does not decrement the PC after breakpoints, then
  3668. we have nothing to do. */
  3669. regcache = get_thread_regcache (thread);
  3670. gdbarch = regcache->arch ();
  3671. decr_pc = gdbarch_decr_pc_after_break (gdbarch);
  3672. if (decr_pc == 0)
  3673. return;
  3674. const address_space *aspace = regcache->aspace ();
  3675. /* Find the location where (if we've hit a breakpoint) the
  3676. breakpoint would be. */
  3677. breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
  3678. /* If the target can't tell whether a software breakpoint triggered,
  3679. fallback to figuring it out based on breakpoints we think were
  3680. inserted in the target, and on whether the thread was stepped or
  3681. continued. */
  3682. /* Check whether there actually is a software breakpoint inserted at
  3683. that location.
  3684. If in non-stop mode, a race condition is possible where we've
  3685. removed a breakpoint, but stop events for that breakpoint were
  3686. already queued and arrive later. To suppress those spurious
  3687. SIGTRAPs, we keep a list of such breakpoint locations for a bit,
  3688. and retire them after a number of stop events are reported. Note
  3689. this is an heuristic and can thus get confused. The real fix is
  3690. to get the "stopped by SW BP and needs adjustment" info out of
  3691. the target/kernel (and thus never reach here; see above). */
  3692. if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
  3693. || (target_is_non_stop_p ()
  3694. && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
  3695. {
  3696. gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
  3697. if (record_full_is_used ())
  3698. restore_operation_disable.emplace
  3699. (record_full_gdb_operation_disable_set ());
  3700. /* When using hardware single-step, a SIGTRAP is reported for both
  3701. a completed single-step and a software breakpoint. Need to
  3702. differentiate between the two, as the latter needs adjusting
  3703. but the former does not.
  3704. The SIGTRAP can be due to a completed hardware single-step only if
  3705. - we didn't insert software single-step breakpoints
  3706. - this thread is currently being stepped
  3707. If any of these events did not occur, we must have stopped due
  3708. to hitting a software breakpoint, and have to back up to the
  3709. breakpoint address.
  3710. As a special case, we could have hardware single-stepped a
  3711. software breakpoint. In this case (prev_pc == breakpoint_pc),
  3712. we also need to back up to the breakpoint address. */
  3713. if (thread_has_single_step_breakpoints_set (thread)
  3714. || !currently_stepping (thread)
  3715. || (thread->stepped_breakpoint
  3716. && thread->prev_pc == breakpoint_pc))
  3717. regcache_write_pc (regcache, breakpoint_pc);
  3718. }
  3719. }
  3720. static bool
  3721. stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
  3722. {
  3723. for (frame = get_prev_frame (frame);
  3724. frame != NULL;
  3725. frame = get_prev_frame (frame))
  3726. {
  3727. if (frame_id_eq (get_frame_id (frame), step_frame_id))
  3728. return true;
  3729. if (get_frame_type (frame) != INLINE_FRAME)
  3730. break;
  3731. }
  3732. return false;
  3733. }
  3734. /* Look for an inline frame that is marked for skip.
  3735. If PREV_FRAME is TRUE start at the previous frame,
  3736. otherwise start at the current frame. Stop at the
  3737. first non-inline frame, or at the frame where the
  3738. step started. */
  3739. static bool
  3740. inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
  3741. {
  3742. struct frame_info *frame = get_current_frame ();
  3743. if (prev_frame)
  3744. frame = get_prev_frame (frame);
  3745. for (; frame != NULL; frame = get_prev_frame (frame))
  3746. {
  3747. const char *fn = NULL;
  3748. symtab_and_line sal;
  3749. struct symbol *sym;
  3750. if (frame_id_eq (get_frame_id (frame), tp->control.step_frame_id))
  3751. break;
  3752. if (get_frame_type (frame) != INLINE_FRAME)
  3753. break;
  3754. sal = find_frame_sal (frame);
  3755. sym = get_frame_function (frame);
  3756. if (sym != NULL)
  3757. fn = sym->print_name ();
  3758. if (sal.line != 0
  3759. && function_name_is_marked_for_skip (fn, sal))
  3760. return true;
  3761. }
  3762. return false;
  3763. }
  3764. /* If the event thread has the stop requested flag set, pretend it
  3765. stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
  3766. target_stop). */
  3767. static bool
  3768. handle_stop_requested (struct execution_control_state *ecs)
  3769. {
  3770. if (ecs->event_thread->stop_requested)
  3771. {
  3772. ecs->ws.set_stopped (GDB_SIGNAL_0);
  3773. handle_signal_stop (ecs);
  3774. return true;
  3775. }
  3776. return false;
  3777. }
  3778. /* Auxiliary function that handles syscall entry/return events.
  3779. It returns true if the inferior should keep going (and GDB
  3780. should ignore the event), or false if the event deserves to be
  3781. processed. */
  3782. static bool
  3783. handle_syscall_event (struct execution_control_state *ecs)
  3784. {
  3785. struct regcache *regcache;
  3786. int syscall_number;
  3787. context_switch (ecs);
  3788. regcache = get_thread_regcache (ecs->event_thread);
  3789. syscall_number = ecs->ws.syscall_number ();
  3790. ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
  3791. if (catch_syscall_enabled () > 0
  3792. && catching_syscall_number (syscall_number))
  3793. {
  3794. infrun_debug_printf ("syscall number=%d", syscall_number);
  3795. ecs->event_thread->control.stop_bpstat
  3796. = bpstat_stop_status_nowatch (regcache->aspace (),
  3797. ecs->event_thread->stop_pc (),
  3798. ecs->event_thread, ecs->ws);
  3799. if (handle_stop_requested (ecs))
  3800. return false;
  3801. if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
  3802. {
  3803. /* Catchpoint hit. */
  3804. return false;
  3805. }
  3806. }
  3807. if (handle_stop_requested (ecs))
  3808. return false;
  3809. /* If no catchpoint triggered for this, then keep going. */
  3810. keep_going (ecs);
  3811. return true;
  3812. }
  3813. /* Lazily fill in the execution_control_state's stop_func_* fields. */
  3814. static void
  3815. fill_in_stop_func (struct gdbarch *gdbarch,
  3816. struct execution_control_state *ecs)
  3817. {
  3818. if (!ecs->stop_func_filled_in)
  3819. {
  3820. const block *block;
  3821. const general_symbol_info *gsi;
  3822. /* Don't care about return value; stop_func_start and stop_func_name
  3823. will both be 0 if it doesn't work. */
  3824. find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
  3825. &gsi,
  3826. &ecs->stop_func_start,
  3827. &ecs->stop_func_end,
  3828. &block);
  3829. ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
  3830. /* The call to find_pc_partial_function, above, will set
  3831. stop_func_start and stop_func_end to the start and end
  3832. of the range containing the stop pc. If this range
  3833. contains the entry pc for the block (which is always the
  3834. case for contiguous blocks), advance stop_func_start past
  3835. the function's start offset and entrypoint. Note that
  3836. stop_func_start is NOT advanced when in a range of a
  3837. non-contiguous block that does not contain the entry pc. */
  3838. if (block != nullptr
  3839. && ecs->stop_func_start <= BLOCK_ENTRY_PC (block)
  3840. && BLOCK_ENTRY_PC (block) < ecs->stop_func_end)
  3841. {
  3842. ecs->stop_func_start
  3843. += gdbarch_deprecated_function_start_offset (gdbarch);
  3844. if (gdbarch_skip_entrypoint_p (gdbarch))
  3845. ecs->stop_func_start
  3846. = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
  3847. }
  3848. ecs->stop_func_filled_in = 1;
  3849. }
  3850. }
  3851. /* Return the STOP_SOON field of the inferior pointed at by ECS. */
  3852. static enum stop_kind
  3853. get_inferior_stop_soon (execution_control_state *ecs)
  3854. {
  3855. struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
  3856. gdb_assert (inf != NULL);
  3857. return inf->control.stop_soon;
  3858. }
  3859. /* Poll for one event out of the current target. Store the resulting
  3860. waitstatus in WS, and return the event ptid. Does not block. */
  3861. static ptid_t
  3862. poll_one_curr_target (struct target_waitstatus *ws)
  3863. {
  3864. ptid_t event_ptid;
  3865. overlay_cache_invalid = 1;
  3866. /* Flush target cache before starting to handle each event.
  3867. Target was running and cache could be stale. This is just a
  3868. heuristic. Running threads may modify target memory, but we
  3869. don't get any event. */
  3870. target_dcache_invalidate ();
  3871. event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
  3872. if (debug_infrun)
  3873. print_target_wait_results (minus_one_ptid, event_ptid, *ws);
  3874. return event_ptid;
  3875. }
  3876. /* Wait for one event out of any target. */
  3877. static wait_one_event
  3878. wait_one ()
  3879. {
  3880. while (1)
  3881. {
  3882. for (inferior *inf : all_inferiors ())
  3883. {
  3884. process_stratum_target *target = inf->process_target ();
  3885. if (target == NULL
  3886. || !target->is_async_p ()
  3887. || !target->threads_executing)
  3888. continue;
  3889. switch_to_inferior_no_thread (inf);
  3890. wait_one_event event;
  3891. event.target = target;
  3892. event.ptid = poll_one_curr_target (&event.ws);
  3893. if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
  3894. {
  3895. /* If nothing is resumed, remove the target from the
  3896. event loop. */
  3897. target_async (0);
  3898. }
  3899. else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
  3900. return event;
  3901. }
  3902. /* Block waiting for some event. */
  3903. fd_set readfds;
  3904. int nfds = 0;
  3905. FD_ZERO (&readfds);
  3906. for (inferior *inf : all_inferiors ())
  3907. {
  3908. process_stratum_target *target = inf->process_target ();
  3909. if (target == NULL
  3910. || !target->is_async_p ()
  3911. || !target->threads_executing)
  3912. continue;
  3913. int fd = target->async_wait_fd ();
  3914. FD_SET (fd, &readfds);
  3915. if (nfds <= fd)
  3916. nfds = fd + 1;
  3917. }
  3918. if (nfds == 0)
  3919. {
  3920. /* No waitable targets left. All must be stopped. */
  3921. target_waitstatus ws;
  3922. ws.set_no_resumed ();
  3923. return {NULL, minus_one_ptid, std::move (ws)};
  3924. }
  3925. QUIT;
  3926. int numfds = interruptible_select (nfds, &readfds, 0, NULL, 0);
  3927. if (numfds < 0)
  3928. {
  3929. if (errno == EINTR)
  3930. continue;
  3931. else
  3932. perror_with_name ("interruptible_select");
  3933. }
  3934. }
  3935. }
  3936. /* Save the thread's event and stop reason to process it later. */
  3937. static void
  3938. save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
  3939. {
  3940. infrun_debug_printf ("saving status %s for %s",
  3941. ws.to_string ().c_str (),
  3942. tp->ptid.to_string ().c_str ());
  3943. /* Record for later. */
  3944. tp->set_pending_waitstatus (ws);
  3945. if (ws.kind () == TARGET_WAITKIND_STOPPED
  3946. && ws.sig () == GDB_SIGNAL_TRAP)
  3947. {
  3948. struct regcache *regcache = get_thread_regcache (tp);
  3949. const address_space *aspace = regcache->aspace ();
  3950. CORE_ADDR pc = regcache_read_pc (regcache);
  3951. adjust_pc_after_break (tp, tp->pending_waitstatus ());
  3952. scoped_restore_current_thread restore_thread;
  3953. switch_to_thread (tp);
  3954. if (target_stopped_by_watchpoint ())
  3955. tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
  3956. else if (target_supports_stopped_by_sw_breakpoint ()
  3957. && target_stopped_by_sw_breakpoint ())
  3958. tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
  3959. else if (target_supports_stopped_by_hw_breakpoint ()
  3960. && target_stopped_by_hw_breakpoint ())
  3961. tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
  3962. else if (!target_supports_stopped_by_hw_breakpoint ()
  3963. && hardware_breakpoint_inserted_here_p (aspace, pc))
  3964. tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
  3965. else if (!target_supports_stopped_by_sw_breakpoint ()
  3966. && software_breakpoint_inserted_here_p (aspace, pc))
  3967. tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
  3968. else if (!thread_has_single_step_breakpoints_set (tp)
  3969. && currently_stepping (tp))
  3970. tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
  3971. }
  3972. }
  3973. /* Mark the non-executing threads accordingly. In all-stop, all
  3974. threads of all processes are stopped when we get any event
  3975. reported. In non-stop mode, only the event thread stops. */
  3976. static void
  3977. mark_non_executing_threads (process_stratum_target *target,
  3978. ptid_t event_ptid,
  3979. const target_waitstatus &ws)
  3980. {
  3981. ptid_t mark_ptid;
  3982. if (!target_is_non_stop_p ())
  3983. mark_ptid = minus_one_ptid;
  3984. else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
  3985. || ws.kind () == TARGET_WAITKIND_EXITED)
  3986. {
  3987. /* If we're handling a process exit in non-stop mode, even
  3988. though threads haven't been deleted yet, one would think
  3989. that there is nothing to do, as threads of the dead process
  3990. will be soon deleted, and threads of any other process were
  3991. left running. However, on some targets, threads survive a
  3992. process exit event. E.g., for the "checkpoint" command,
  3993. when the current checkpoint/fork exits, linux-fork.c
  3994. automatically switches to another fork from within
  3995. target_mourn_inferior, by associating the same
  3996. inferior/thread to another fork. We haven't mourned yet at
  3997. this point, but we must mark any threads left in the
  3998. process as not-executing so that finish_thread_state marks
  3999. them stopped (in the user's perspective) if/when we present
  4000. the stop to the user. */
  4001. mark_ptid = ptid_t (event_ptid.pid ());
  4002. }
  4003. else
  4004. mark_ptid = event_ptid;
  4005. set_executing (target, mark_ptid, false);
  4006. /* Likewise the resumed flag. */
  4007. set_resumed (target, mark_ptid, false);
  4008. }
  4009. /* Handle one event after stopping threads. If the eventing thread
  4010. reports back any interesting event, we leave it pending. If the
  4011. eventing thread was in the middle of a displaced step, we
  4012. cancel/finish it, and unless the thread's inferior is being
  4013. detached, put the thread back in the step-over chain. Returns true
  4014. if there are no resumed threads left in the target (thus there's no
  4015. point in waiting further), false otherwise. */
  4016. static bool
  4017. handle_one (const wait_one_event &event)
  4018. {
  4019. infrun_debug_printf
  4020. ("%s %s", event.ws.to_string ().c_str (),
  4021. event.ptid.to_string ().c_str ());
  4022. if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
  4023. {
  4024. /* All resumed threads exited. */
  4025. return true;
  4026. }
  4027. else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
  4028. || event.ws.kind () == TARGET_WAITKIND_EXITED
  4029. || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
  4030. {
  4031. /* One thread/process exited/signalled. */
  4032. thread_info *t = nullptr;
  4033. /* The target may have reported just a pid. If so, try
  4034. the first non-exited thread. */
  4035. if (event.ptid.is_pid ())
  4036. {
  4037. int pid = event.ptid.pid ();
  4038. inferior *inf = find_inferior_pid (event.target, pid);
  4039. for (thread_info *tp : inf->non_exited_threads ())
  4040. {
  4041. t = tp;
  4042. break;
  4043. }
  4044. /* If there is no available thread, the event would
  4045. have to be appended to a per-inferior event list,
  4046. which does not exist (and if it did, we'd have
  4047. to adjust run control command to be able to
  4048. resume such an inferior). We assert here instead
  4049. of going into an infinite loop. */
  4050. gdb_assert (t != nullptr);
  4051. infrun_debug_printf
  4052. ("using %s", t->ptid.to_string ().c_str ());
  4053. }
  4054. else
  4055. {
  4056. t = find_thread_ptid (event.target, event.ptid);
  4057. /* Check if this is the first time we see this thread.
  4058. Don't bother adding if it individually exited. */
  4059. if (t == nullptr
  4060. && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
  4061. t = add_thread (event.target, event.ptid);
  4062. }
  4063. if (t != nullptr)
  4064. {
  4065. /* Set the threads as non-executing to avoid
  4066. another stop attempt on them. */
  4067. switch_to_thread_no_regs (t);
  4068. mark_non_executing_threads (event.target, event.ptid,
  4069. event.ws);
  4070. save_waitstatus (t, event.ws);
  4071. t->stop_requested = false;
  4072. }
  4073. }
  4074. else
  4075. {
  4076. thread_info *t = find_thread_ptid (event.target, event.ptid);
  4077. if (t == NULL)
  4078. t = add_thread (event.target, event.ptid);
  4079. t->stop_requested = 0;
  4080. t->set_executing (false);
  4081. t->set_resumed (false);
  4082. t->control.may_range_step = 0;
  4083. /* This may be the first time we see the inferior report
  4084. a stop. */
  4085. if (t->inf->needs_setup)
  4086. {
  4087. switch_to_thread_no_regs (t);
  4088. setup_inferior (0);
  4089. }
  4090. if (event.ws.kind () == TARGET_WAITKIND_STOPPED
  4091. && event.ws.sig () == GDB_SIGNAL_0)
  4092. {
  4093. /* We caught the event that we intended to catch, so
  4094. there's no event to save as pending. */
  4095. if (displaced_step_finish (t, GDB_SIGNAL_0)
  4096. == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
  4097. {
  4098. /* Add it back to the step-over queue. */
  4099. infrun_debug_printf
  4100. ("displaced-step of %s canceled",
  4101. t->ptid.to_string ().c_str ());
  4102. t->control.trap_expected = 0;
  4103. if (!t->inf->detaching)
  4104. global_thread_step_over_chain_enqueue (t);
  4105. }
  4106. }
  4107. else
  4108. {
  4109. enum gdb_signal sig;
  4110. struct regcache *regcache;
  4111. infrun_debug_printf
  4112. ("target_wait %s, saving status for %s",
  4113. event.ws.to_string ().c_str (),
  4114. t->ptid.to_string ().c_str ());
  4115. /* Record for later. */
  4116. save_waitstatus (t, event.ws);
  4117. sig = (event.ws.kind () == TARGET_WAITKIND_STOPPED
  4118. ? event.ws.sig () : GDB_SIGNAL_0);
  4119. if (displaced_step_finish (t, sig)
  4120. == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
  4121. {
  4122. /* Add it back to the step-over queue. */
  4123. t->control.trap_expected = 0;
  4124. if (!t->inf->detaching)
  4125. global_thread_step_over_chain_enqueue (t);
  4126. }
  4127. regcache = get_thread_regcache (t);
  4128. t->set_stop_pc (regcache_read_pc (regcache));
  4129. infrun_debug_printf ("saved stop_pc=%s for %s "
  4130. "(currently_stepping=%d)",
  4131. paddress (target_gdbarch (), t->stop_pc ()),
  4132. t->ptid.to_string ().c_str (),
  4133. currently_stepping (t));
  4134. }
  4135. }
  4136. return false;
  4137. }
  4138. /* See infrun.h. */
  4139. void
  4140. stop_all_threads (const char *reason, inferior *inf)
  4141. {
  4142. /* We may need multiple passes to discover all threads. */
  4143. int pass;
  4144. int iterations = 0;
  4145. gdb_assert (exists_non_stop_target ());
  4146. INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
  4147. inf != nullptr ? inf->num : -1);
  4148. scoped_restore_current_thread restore_thread;
  4149. /* Enable thread events on relevant targets. */
  4150. for (auto *target : all_non_exited_process_targets ())
  4151. {
  4152. if (inf != nullptr && inf->process_target () != target)
  4153. continue;
  4154. switch_to_target_no_thread (target);
  4155. target_thread_events (true);
  4156. }
  4157. SCOPE_EXIT
  4158. {
  4159. /* Disable thread events on relevant targets. */
  4160. for (auto *target : all_non_exited_process_targets ())
  4161. {
  4162. if (inf != nullptr && inf->process_target () != target)
  4163. continue;
  4164. switch_to_target_no_thread (target);
  4165. target_thread_events (false);
  4166. }
  4167. /* Use debug_prefixed_printf directly to get a meaningful function
  4168. name. */
  4169. if (debug_infrun)
  4170. debug_prefixed_printf ("infrun", "stop_all_threads", "done");
  4171. };
  4172. /* Request threads to stop, and then wait for the stops. Because
  4173. threads we already know about can spawn more threads while we're
  4174. trying to stop them, and we only learn about new threads when we
  4175. update the thread list, do this in a loop, and keep iterating
  4176. until two passes find no threads that need to be stopped. */
  4177. for (pass = 0; pass < 2; pass++, iterations++)
  4178. {
  4179. infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
  4180. while (1)
  4181. {
  4182. int waits_needed = 0;
  4183. for (auto *target : all_non_exited_process_targets ())
  4184. {
  4185. if (inf != nullptr && inf->process_target () != target)
  4186. continue;
  4187. switch_to_target_no_thread (target);
  4188. update_thread_list ();
  4189. }
  4190. /* Go through all threads looking for threads that we need
  4191. to tell the target to stop. */
  4192. for (thread_info *t : all_non_exited_threads ())
  4193. {
  4194. if (inf != nullptr && t->inf != inf)
  4195. continue;
  4196. /* For a single-target setting with an all-stop target,
  4197. we would not even arrive here. For a multi-target
  4198. setting, until GDB is able to handle a mixture of
  4199. all-stop and non-stop targets, simply skip all-stop
  4200. targets' threads. This should be fine due to the
  4201. protection of 'check_multi_target_resumption'. */
  4202. switch_to_thread_no_regs (t);
  4203. if (!target_is_non_stop_p ())
  4204. continue;
  4205. if (t->executing ())
  4206. {
  4207. /* If already stopping, don't request a stop again.
  4208. We just haven't seen the notification yet. */
  4209. if (!t->stop_requested)
  4210. {
  4211. infrun_debug_printf (" %s executing, need stop",
  4212. t->ptid.to_string ().c_str ());
  4213. target_stop (t->ptid);
  4214. t->stop_requested = 1;
  4215. }
  4216. else
  4217. {
  4218. infrun_debug_printf (" %s executing, already stopping",
  4219. t->ptid.to_string ().c_str ());
  4220. }
  4221. if (t->stop_requested)
  4222. waits_needed++;
  4223. }
  4224. else
  4225. {
  4226. infrun_debug_printf (" %s not executing",
  4227. t->ptid.to_string ().c_str ());
  4228. /* The thread may be not executing, but still be
  4229. resumed with a pending status to process. */
  4230. t->set_resumed (false);
  4231. }
  4232. }
  4233. if (waits_needed == 0)
  4234. break;
  4235. /* If we find new threads on the second iteration, restart
  4236. over. We want to see two iterations in a row with all
  4237. threads stopped. */
  4238. if (pass > 0)
  4239. pass = -1;
  4240. for (int i = 0; i < waits_needed; i++)
  4241. {
  4242. wait_one_event event = wait_one ();
  4243. if (handle_one (event))
  4244. break;
  4245. }
  4246. }
  4247. }
  4248. }
  4249. /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
  4250. static bool
  4251. handle_no_resumed (struct execution_control_state *ecs)
  4252. {
  4253. if (target_can_async_p ())
  4254. {
  4255. bool any_sync = false;
  4256. for (ui *ui : all_uis ())
  4257. {
  4258. if (ui->prompt_state == PROMPT_BLOCKED)
  4259. {
  4260. any_sync = true;
  4261. break;
  4262. }
  4263. }
  4264. if (!any_sync)
  4265. {
  4266. /* There were no unwaited-for children left in the target, but,
  4267. we're not synchronously waiting for events either. Just
  4268. ignore. */
  4269. infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
  4270. prepare_to_wait (ecs);
  4271. return true;
  4272. }
  4273. }
  4274. /* Otherwise, if we were running a synchronous execution command, we
  4275. may need to cancel it and give the user back the terminal.
  4276. In non-stop mode, the target can't tell whether we've already
  4277. consumed previous stop events, so it can end up sending us a
  4278. no-resumed event like so:
  4279. #0 - thread 1 is left stopped
  4280. #1 - thread 2 is resumed and hits breakpoint
  4281. -> TARGET_WAITKIND_STOPPED
  4282. #2 - thread 3 is resumed and exits
  4283. this is the last resumed thread, so
  4284. -> TARGET_WAITKIND_NO_RESUMED
  4285. #3 - gdb processes stop for thread 2 and decides to re-resume
  4286. it.
  4287. #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
  4288. thread 2 is now resumed, so the event should be ignored.
  4289. IOW, if the stop for thread 2 doesn't end a foreground command,
  4290. then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
  4291. event. But it could be that the event meant that thread 2 itself
  4292. (or whatever other thread was the last resumed thread) exited.
  4293. To address this we refresh the thread list and check whether we
  4294. have resumed threads _now_. In the example above, this removes
  4295. thread 3 from the thread list. If thread 2 was re-resumed, we
  4296. ignore this event. If we find no thread resumed, then we cancel
  4297. the synchronous command and show "no unwaited-for " to the
  4298. user. */
  4299. inferior *curr_inf = current_inferior ();
  4300. scoped_restore_current_thread restore_thread;
  4301. for (auto *target : all_non_exited_process_targets ())
  4302. {
  4303. switch_to_target_no_thread (target);
  4304. update_thread_list ();
  4305. }
  4306. /* If:
  4307. - the current target has no thread executing, and
  4308. - the current inferior is native, and
  4309. - the current inferior is the one which has the terminal, and
  4310. - we did nothing,
  4311. then a Ctrl-C from this point on would remain stuck in the
  4312. kernel, until a thread resumes and dequeues it. That would
  4313. result in the GDB CLI not reacting to Ctrl-C, not able to
  4314. interrupt the program. To address this, if the current inferior
  4315. no longer has any thread executing, we give the terminal to some
  4316. other inferior that has at least one thread executing. */
  4317. bool swap_terminal = true;
  4318. /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
  4319. whether to report it to the user. */
  4320. bool ignore_event = false;
  4321. for (thread_info *thread : all_non_exited_threads ())
  4322. {
  4323. if (swap_terminal && thread->executing ())
  4324. {
  4325. if (thread->inf != curr_inf)
  4326. {
  4327. target_terminal::ours ();
  4328. switch_to_thread (thread);
  4329. target_terminal::inferior ();
  4330. }
  4331. swap_terminal = false;
  4332. }
  4333. if (!ignore_event && thread->resumed ())
  4334. {
  4335. /* Either there were no unwaited-for children left in the
  4336. target at some point, but there are now, or some target
  4337. other than the eventing one has unwaited-for children
  4338. left. Just ignore. */
  4339. infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
  4340. "(ignoring: found resumed)");
  4341. ignore_event = true;
  4342. }
  4343. if (ignore_event && !swap_terminal)
  4344. break;
  4345. }
  4346. if (ignore_event)
  4347. {
  4348. switch_to_inferior_no_thread (curr_inf);
  4349. prepare_to_wait (ecs);
  4350. return true;
  4351. }
  4352. /* Go ahead and report the event. */
  4353. return false;
  4354. }
  4355. /* Given an execution control state that has been freshly filled in by
  4356. an event from the inferior, figure out what it means and take
  4357. appropriate action.
  4358. The alternatives are:
  4359. 1) stop_waiting and return; to really stop and return to the
  4360. debugger.
  4361. 2) keep_going and return; to wait for the next event (set
  4362. ecs->event_thread->stepping_over_breakpoint to 1 to single step
  4363. once). */
  4364. static void
  4365. handle_inferior_event (struct execution_control_state *ecs)
  4366. {
  4367. /* Make sure that all temporary struct value objects that were
  4368. created during the handling of the event get deleted at the
  4369. end. */
  4370. scoped_value_mark free_values;
  4371. infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
  4372. if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
  4373. {
  4374. /* We had an event in the inferior, but we are not interested in
  4375. handling it at this level. The lower layers have already
  4376. done what needs to be done, if anything.
  4377. One of the possible circumstances for this is when the
  4378. inferior produces output for the console. The inferior has
  4379. not stopped, and we are ignoring the event. Another possible
  4380. circumstance is any event which the lower level knows will be
  4381. reported multiple times without an intervening resume. */
  4382. prepare_to_wait (ecs);
  4383. return;
  4384. }
  4385. if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
  4386. {
  4387. prepare_to_wait (ecs);
  4388. return;
  4389. }
  4390. if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
  4391. && handle_no_resumed (ecs))
  4392. return;
  4393. /* Cache the last target/ptid/waitstatus. */
  4394. set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
  4395. /* Always clear state belonging to the previous time we stopped. */
  4396. stop_stack_dummy = STOP_NONE;
  4397. if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
  4398. {
  4399. /* No unwaited-for children left. IOW, all resumed children
  4400. have exited. */
  4401. stop_print_frame = false;
  4402. stop_waiting (ecs);
  4403. return;
  4404. }
  4405. if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
  4406. && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
  4407. {
  4408. ecs->event_thread = find_thread_ptid (ecs->target, ecs->ptid);
  4409. /* If it's a new thread, add it to the thread database. */
  4410. if (ecs->event_thread == NULL)
  4411. ecs->event_thread = add_thread (ecs->target, ecs->ptid);
  4412. /* Disable range stepping. If the next step request could use a
  4413. range, this will be end up re-enabled then. */
  4414. ecs->event_thread->control.may_range_step = 0;
  4415. }
  4416. /* Dependent on valid ECS->EVENT_THREAD. */
  4417. adjust_pc_after_break (ecs->event_thread, ecs->ws);
  4418. /* Dependent on the current PC value modified by adjust_pc_after_break. */
  4419. reinit_frame_cache ();
  4420. breakpoint_retire_moribund ();
  4421. /* First, distinguish signals caused by the debugger from signals
  4422. that have to do with the program's own actions. Note that
  4423. breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
  4424. on the operating system version. Here we detect when a SIGILL or
  4425. SIGEMT is really a breakpoint and change it to SIGTRAP. We do
  4426. something similar for SIGSEGV, since a SIGSEGV will be generated
  4427. when we're trying to execute a breakpoint instruction on a
  4428. non-executable stack. This happens for call dummy breakpoints
  4429. for architectures like SPARC that place call dummies on the
  4430. stack. */
  4431. if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
  4432. && (ecs->ws.sig () == GDB_SIGNAL_ILL
  4433. || ecs->ws.sig () == GDB_SIGNAL_SEGV
  4434. || ecs->ws.sig () == GDB_SIGNAL_EMT))
  4435. {
  4436. struct regcache *regcache = get_thread_regcache (ecs->event_thread);
  4437. if (breakpoint_inserted_here_p (regcache->aspace (),
  4438. regcache_read_pc (regcache)))
  4439. {
  4440. infrun_debug_printf ("Treating signal as SIGTRAP");
  4441. ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
  4442. }
  4443. }
  4444. mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
  4445. switch (ecs->ws.kind ())
  4446. {
  4447. case TARGET_WAITKIND_LOADED:
  4448. {
  4449. context_switch (ecs);
  4450. /* Ignore gracefully during startup of the inferior, as it might
  4451. be the shell which has just loaded some objects, otherwise
  4452. add the symbols for the newly loaded objects. Also ignore at
  4453. the beginning of an attach or remote session; we will query
  4454. the full list of libraries once the connection is
  4455. established. */
  4456. stop_kind stop_soon = get_inferior_stop_soon (ecs);
  4457. if (stop_soon == NO_STOP_QUIETLY)
  4458. {
  4459. struct regcache *regcache;
  4460. regcache = get_thread_regcache (ecs->event_thread);
  4461. handle_solib_event ();
  4462. ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
  4463. ecs->event_thread->control.stop_bpstat
  4464. = bpstat_stop_status_nowatch (regcache->aspace (),
  4465. ecs->event_thread->stop_pc (),
  4466. ecs->event_thread, ecs->ws);
  4467. if (handle_stop_requested (ecs))
  4468. return;
  4469. if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
  4470. {
  4471. /* A catchpoint triggered. */
  4472. process_event_stop_test (ecs);
  4473. return;
  4474. }
  4475. /* If requested, stop when the dynamic linker notifies
  4476. gdb of events. This allows the user to get control
  4477. and place breakpoints in initializer routines for
  4478. dynamically loaded objects (among other things). */
  4479. ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
  4480. if (stop_on_solib_events)
  4481. {
  4482. /* Make sure we print "Stopped due to solib-event" in
  4483. normal_stop. */
  4484. stop_print_frame = true;
  4485. stop_waiting (ecs);
  4486. return;
  4487. }
  4488. }
  4489. /* If we are skipping through a shell, or through shared library
  4490. loading that we aren't interested in, resume the program. If
  4491. we're running the program normally, also resume. */
  4492. if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
  4493. {
  4494. /* Loading of shared libraries might have changed breakpoint
  4495. addresses. Make sure new breakpoints are inserted. */
  4496. if (stop_soon == NO_STOP_QUIETLY)
  4497. insert_breakpoints ();
  4498. resume (GDB_SIGNAL_0);
  4499. prepare_to_wait (ecs);
  4500. return;
  4501. }
  4502. /* But stop if we're attaching or setting up a remote
  4503. connection. */
  4504. if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
  4505. || stop_soon == STOP_QUIETLY_REMOTE)
  4506. {
  4507. infrun_debug_printf ("quietly stopped");
  4508. stop_waiting (ecs);
  4509. return;
  4510. }
  4511. internal_error (__FILE__, __LINE__,
  4512. _("unhandled stop_soon: %d"), (int) stop_soon);
  4513. }
  4514. case TARGET_WAITKIND_SPURIOUS:
  4515. if (handle_stop_requested (ecs))
  4516. return;
  4517. context_switch (ecs);
  4518. resume (GDB_SIGNAL_0);
  4519. prepare_to_wait (ecs);
  4520. return;
  4521. case TARGET_WAITKIND_THREAD_CREATED:
  4522. if (handle_stop_requested (ecs))
  4523. return;
  4524. context_switch (ecs);
  4525. if (!switch_back_to_stepped_thread (ecs))
  4526. keep_going (ecs);
  4527. return;
  4528. case TARGET_WAITKIND_EXITED:
  4529. case TARGET_WAITKIND_SIGNALLED:
  4530. {
  4531. /* Depending on the system, ecs->ptid may point to a thread or
  4532. to a process. On some targets, target_mourn_inferior may
  4533. need to have access to the just-exited thread. That is the
  4534. case of GNU/Linux's "checkpoint" support, for example.
  4535. Call the switch_to_xxx routine as appropriate. */
  4536. thread_info *thr = find_thread_ptid (ecs->target, ecs->ptid);
  4537. if (thr != nullptr)
  4538. switch_to_thread (thr);
  4539. else
  4540. {
  4541. inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
  4542. switch_to_inferior_no_thread (inf);
  4543. }
  4544. }
  4545. handle_vfork_child_exec_or_exit (0);
  4546. target_terminal::ours (); /* Must do this before mourn anyway. */
  4547. /* Clearing any previous state of convenience variables. */
  4548. clear_exit_convenience_vars ();
  4549. if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
  4550. {
  4551. /* Record the exit code in the convenience variable $_exitcode, so
  4552. that the user can inspect this again later. */
  4553. set_internalvar_integer (lookup_internalvar ("_exitcode"),
  4554. (LONGEST) ecs->ws.exit_status ());
  4555. /* Also record this in the inferior itself. */
  4556. current_inferior ()->has_exit_code = 1;
  4557. current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
  4558. /* Support the --return-child-result option. */
  4559. return_child_result_value = ecs->ws.exit_status ();
  4560. gdb::observers::exited.notify (ecs->ws.exit_status ());
  4561. }
  4562. else
  4563. {
  4564. struct gdbarch *gdbarch = current_inferior ()->gdbarch;
  4565. if (gdbarch_gdb_signal_to_target_p (gdbarch))
  4566. {
  4567. /* Set the value of the internal variable $_exitsignal,
  4568. which holds the signal uncaught by the inferior. */
  4569. set_internalvar_integer (lookup_internalvar ("_exitsignal"),
  4570. gdbarch_gdb_signal_to_target (gdbarch,
  4571. ecs->ws.sig ()));
  4572. }
  4573. else
  4574. {
  4575. /* We don't have access to the target's method used for
  4576. converting between signal numbers (GDB's internal
  4577. representation <-> target's representation).
  4578. Therefore, we cannot do a good job at displaying this
  4579. information to the user. It's better to just warn
  4580. her about it (if infrun debugging is enabled), and
  4581. give up. */
  4582. infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
  4583. "signal number.");
  4584. }
  4585. gdb::observers::signal_exited.notify (ecs->ws.sig ());
  4586. }
  4587. gdb_flush (gdb_stdout);
  4588. target_mourn_inferior (inferior_ptid);
  4589. stop_print_frame = false;
  4590. stop_waiting (ecs);
  4591. return;
  4592. case TARGET_WAITKIND_FORKED:
  4593. case TARGET_WAITKIND_VFORKED:
  4594. /* Check whether the inferior is displaced stepping. */
  4595. {
  4596. struct regcache *regcache = get_thread_regcache (ecs->event_thread);
  4597. struct gdbarch *gdbarch = regcache->arch ();
  4598. inferior *parent_inf = find_inferior_ptid (ecs->target, ecs->ptid);
  4599. /* If this is a fork (child gets its own address space copy)
  4600. and some displaced step buffers were in use at the time of
  4601. the fork, restore the displaced step buffer bytes in the
  4602. child process.
  4603. Architectures which support displaced stepping and fork
  4604. events must supply an implementation of
  4605. gdbarch_displaced_step_restore_all_in_ptid. This is not
  4606. enforced during gdbarch validation to support architectures
  4607. which support displaced stepping but not forks. */
  4608. if (ecs->ws.kind () == TARGET_WAITKIND_FORKED
  4609. && gdbarch_supports_displaced_stepping (gdbarch))
  4610. gdbarch_displaced_step_restore_all_in_ptid
  4611. (gdbarch, parent_inf, ecs->ws.child_ptid ());
  4612. /* If displaced stepping is supported, and thread ecs->ptid is
  4613. displaced stepping. */
  4614. if (displaced_step_in_progress_thread (ecs->event_thread))
  4615. {
  4616. struct regcache *child_regcache;
  4617. CORE_ADDR parent_pc;
  4618. /* GDB has got TARGET_WAITKIND_FORKED or TARGET_WAITKIND_VFORKED,
  4619. indicating that the displaced stepping of syscall instruction
  4620. has been done. Perform cleanup for parent process here. Note
  4621. that this operation also cleans up the child process for vfork,
  4622. because their pages are shared. */
  4623. displaced_step_finish (ecs->event_thread, GDB_SIGNAL_TRAP);
  4624. /* Start a new step-over in another thread if there's one
  4625. that needs it. */
  4626. start_step_over ();
  4627. /* Since the vfork/fork syscall instruction was executed in the scratchpad,
  4628. the child's PC is also within the scratchpad. Set the child's PC
  4629. to the parent's PC value, which has already been fixed up.
  4630. FIXME: we use the parent's aspace here, although we're touching
  4631. the child, because the child hasn't been added to the inferior
  4632. list yet at this point. */
  4633. child_regcache
  4634. = get_thread_arch_aspace_regcache (parent_inf->process_target (),
  4635. ecs->ws.child_ptid (),
  4636. gdbarch,
  4637. parent_inf->aspace);
  4638. /* Read PC value of parent process. */
  4639. parent_pc = regcache_read_pc (regcache);
  4640. displaced_debug_printf ("write child pc from %s to %s",
  4641. paddress (gdbarch,
  4642. regcache_read_pc (child_regcache)),
  4643. paddress (gdbarch, parent_pc));
  4644. regcache_write_pc (child_regcache, parent_pc);
  4645. }
  4646. }
  4647. context_switch (ecs);
  4648. /* Immediately detach breakpoints from the child before there's
  4649. any chance of letting the user delete breakpoints from the
  4650. breakpoint lists. If we don't do this early, it's easy to
  4651. leave left over traps in the child, vis: "break foo; catch
  4652. fork; c; <fork>; del; c; <child calls foo>". We only follow
  4653. the fork on the last `continue', and by that time the
  4654. breakpoint at "foo" is long gone from the breakpoint table.
  4655. If we vforked, then we don't need to unpatch here, since both
  4656. parent and child are sharing the same memory pages; we'll
  4657. need to unpatch at follow/detach time instead to be certain
  4658. that new breakpoints added between catchpoint hit time and
  4659. vfork follow are detached. */
  4660. if (ecs->ws.kind () != TARGET_WAITKIND_VFORKED)
  4661. {
  4662. /* This won't actually modify the breakpoint list, but will
  4663. physically remove the breakpoints from the child. */
  4664. detach_breakpoints (ecs->ws.child_ptid ());
  4665. }
  4666. delete_just_stopped_threads_single_step_breakpoints ();
  4667. /* In case the event is caught by a catchpoint, remember that
  4668. the event is to be followed at the next resume of the thread,
  4669. and not immediately. */
  4670. ecs->event_thread->pending_follow = ecs->ws;
  4671. ecs->event_thread->set_stop_pc
  4672. (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
  4673. ecs->event_thread->control.stop_bpstat
  4674. = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
  4675. ecs->event_thread->stop_pc (),
  4676. ecs->event_thread, ecs->ws);
  4677. if (handle_stop_requested (ecs))
  4678. return;
  4679. /* If no catchpoint triggered for this, then keep going. Note
  4680. that we're interested in knowing the bpstat actually causes a
  4681. stop, not just if it may explain the signal. Software
  4682. watchpoints, for example, always appear in the bpstat. */
  4683. if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
  4684. {
  4685. bool follow_child
  4686. = (follow_fork_mode_string == follow_fork_mode_child);
  4687. ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
  4688. process_stratum_target *targ
  4689. = ecs->event_thread->inf->process_target ();
  4690. bool should_resume = follow_fork ();
  4691. /* Note that one of these may be an invalid pointer,
  4692. depending on detach_fork. */
  4693. thread_info *parent = ecs->event_thread;
  4694. thread_info *child = find_thread_ptid (targ, ecs->ws.child_ptid ());
  4695. /* At this point, the parent is marked running, and the
  4696. child is marked stopped. */
  4697. /* If not resuming the parent, mark it stopped. */
  4698. if (follow_child && !detach_fork && !non_stop && !sched_multi)
  4699. parent->set_running (false);
  4700. /* If resuming the child, mark it running. */
  4701. if (follow_child || (!detach_fork && (non_stop || sched_multi)))
  4702. child->set_running (true);
  4703. /* In non-stop mode, also resume the other branch. */
  4704. if (!detach_fork && (non_stop
  4705. || (sched_multi && target_is_non_stop_p ())))
  4706. {
  4707. if (follow_child)
  4708. switch_to_thread (parent);
  4709. else
  4710. switch_to_thread (child);
  4711. ecs->event_thread = inferior_thread ();
  4712. ecs->ptid = inferior_ptid;
  4713. keep_going (ecs);
  4714. }
  4715. if (follow_child)
  4716. switch_to_thread (child);
  4717. else
  4718. switch_to_thread (parent);
  4719. ecs->event_thread = inferior_thread ();
  4720. ecs->ptid = inferior_ptid;
  4721. if (should_resume)
  4722. {
  4723. /* Never call switch_back_to_stepped_thread if we are waiting for
  4724. vfork-done (waiting for an external vfork child to exec or
  4725. exit). We will resume only the vforking thread for the purpose
  4726. of collecting the vfork-done event, and we will restart any
  4727. step once the critical shared address space window is done. */
  4728. if ((!follow_child
  4729. && detach_fork
  4730. && parent->inf->thread_waiting_for_vfork_done != nullptr)
  4731. || !switch_back_to_stepped_thread (ecs))
  4732. keep_going (ecs);
  4733. }
  4734. else
  4735. stop_waiting (ecs);
  4736. return;
  4737. }
  4738. process_event_stop_test (ecs);
  4739. return;
  4740. case TARGET_WAITKIND_VFORK_DONE:
  4741. /* Done with the shared memory region. Re-insert breakpoints in
  4742. the parent, and keep going. */
  4743. context_switch (ecs);
  4744. handle_vfork_done (ecs->event_thread);
  4745. gdb_assert (inferior_thread () == ecs->event_thread);
  4746. if (handle_stop_requested (ecs))
  4747. return;
  4748. if (!switch_back_to_stepped_thread (ecs))
  4749. {
  4750. gdb_assert (inferior_thread () == ecs->event_thread);
  4751. /* This also takes care of reinserting breakpoints in the
  4752. previously locked inferior. */
  4753. keep_going (ecs);
  4754. }
  4755. return;
  4756. case TARGET_WAITKIND_EXECD:
  4757. /* Note we can't read registers yet (the stop_pc), because we
  4758. don't yet know the inferior's post-exec architecture.
  4759. 'stop_pc' is explicitly read below instead. */
  4760. switch_to_thread_no_regs (ecs->event_thread);
  4761. /* Do whatever is necessary to the parent branch of the vfork. */
  4762. handle_vfork_child_exec_or_exit (1);
  4763. /* This causes the eventpoints and symbol table to be reset.
  4764. Must do this now, before trying to determine whether to
  4765. stop. */
  4766. follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
  4767. /* In follow_exec we may have deleted the original thread and
  4768. created a new one. Make sure that the event thread is the
  4769. execd thread for that case (this is a nop otherwise). */
  4770. ecs->event_thread = inferior_thread ();
  4771. ecs->event_thread->set_stop_pc
  4772. (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
  4773. ecs->event_thread->control.stop_bpstat
  4774. = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
  4775. ecs->event_thread->stop_pc (),
  4776. ecs->event_thread, ecs->ws);
  4777. if (handle_stop_requested (ecs))
  4778. return;
  4779. /* If no catchpoint triggered for this, then keep going. */
  4780. if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
  4781. {
  4782. ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
  4783. keep_going (ecs);
  4784. return;
  4785. }
  4786. process_event_stop_test (ecs);
  4787. return;
  4788. /* Be careful not to try to gather much state about a thread
  4789. that's in a syscall. It's frequently a losing proposition. */
  4790. case TARGET_WAITKIND_SYSCALL_ENTRY:
  4791. /* Getting the current syscall number. */
  4792. if (handle_syscall_event (ecs) == 0)
  4793. process_event_stop_test (ecs);
  4794. return;
  4795. /* Before examining the threads further, step this thread to
  4796. get it entirely out of the syscall. (We get notice of the
  4797. event when the thread is just on the verge of exiting a
  4798. syscall. Stepping one instruction seems to get it back
  4799. into user code.) */
  4800. case TARGET_WAITKIND_SYSCALL_RETURN:
  4801. if (handle_syscall_event (ecs) == 0)
  4802. process_event_stop_test (ecs);
  4803. return;
  4804. case TARGET_WAITKIND_STOPPED:
  4805. handle_signal_stop (ecs);
  4806. return;
  4807. case TARGET_WAITKIND_NO_HISTORY:
  4808. /* Reverse execution: target ran out of history info. */
  4809. /* Switch to the stopped thread. */
  4810. context_switch (ecs);
  4811. infrun_debug_printf ("stopped");
  4812. delete_just_stopped_threads_single_step_breakpoints ();
  4813. ecs->event_thread->set_stop_pc
  4814. (regcache_read_pc (get_thread_regcache (inferior_thread ())));
  4815. if (handle_stop_requested (ecs))
  4816. return;
  4817. gdb::observers::no_history.notify ();
  4818. stop_waiting (ecs);
  4819. return;
  4820. }
  4821. }
  4822. /* Restart threads back to what they were trying to do back when we
  4823. paused them (because of an in-line step-over or vfork, for example).
  4824. The EVENT_THREAD thread is ignored (not restarted).
  4825. If INF is non-nullptr, only resume threads from INF. */
  4826. static void
  4827. restart_threads (struct thread_info *event_thread, inferior *inf)
  4828. {
  4829. INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
  4830. event_thread->ptid.to_string ().c_str (),
  4831. inf != nullptr ? inf->num : -1);
  4832. /* In case the instruction just stepped spawned a new thread. */
  4833. update_thread_list ();
  4834. for (thread_info *tp : all_non_exited_threads ())
  4835. {
  4836. if (inf != nullptr && tp->inf != inf)
  4837. continue;
  4838. if (tp->inf->detaching)
  4839. {
  4840. infrun_debug_printf ("restart threads: [%s] inferior detaching",
  4841. tp->ptid.to_string ().c_str ());
  4842. continue;
  4843. }
  4844. switch_to_thread_no_regs (tp);
  4845. if (tp == event_thread)
  4846. {
  4847. infrun_debug_printf ("restart threads: [%s] is event thread",
  4848. tp->ptid.to_string ().c_str ());
  4849. continue;
  4850. }
  4851. if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
  4852. {
  4853. infrun_debug_printf ("restart threads: [%s] not meant to be running",
  4854. tp->ptid.to_string ().c_str ());
  4855. continue;
  4856. }
  4857. if (tp->resumed ())
  4858. {
  4859. infrun_debug_printf ("restart threads: [%s] resumed",
  4860. tp->ptid.to_string ().c_str ());
  4861. gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
  4862. continue;
  4863. }
  4864. if (thread_is_in_step_over_chain (tp))
  4865. {
  4866. infrun_debug_printf ("restart threads: [%s] needs step-over",
  4867. tp->ptid.to_string ().c_str ());
  4868. gdb_assert (!tp->resumed ());
  4869. continue;
  4870. }
  4871. if (tp->has_pending_waitstatus ())
  4872. {
  4873. infrun_debug_printf ("restart threads: [%s] has pending status",
  4874. tp->ptid.to_string ().c_str ());
  4875. tp->set_resumed (true);
  4876. continue;
  4877. }
  4878. gdb_assert (!tp->stop_requested);
  4879. /* If some thread needs to start a step-over at this point, it
  4880. should still be in the step-over queue, and thus skipped
  4881. above. */
  4882. if (thread_still_needs_step_over (tp))
  4883. {
  4884. internal_error (__FILE__, __LINE__,
  4885. "thread [%s] needs a step-over, but not in "
  4886. "step-over queue\n",
  4887. tp->ptid.to_string ().c_str ());
  4888. }
  4889. if (currently_stepping (tp))
  4890. {
  4891. infrun_debug_printf ("restart threads: [%s] was stepping",
  4892. tp->ptid.to_string ().c_str ());
  4893. keep_going_stepped_thread (tp);
  4894. }
  4895. else
  4896. {
  4897. struct execution_control_state ecss;
  4898. struct execution_control_state *ecs = &ecss;
  4899. infrun_debug_printf ("restart threads: [%s] continuing",
  4900. tp->ptid.to_string ().c_str ());
  4901. reset_ecs (ecs, tp);
  4902. switch_to_thread (tp);
  4903. keep_going_pass_signal (ecs);
  4904. }
  4905. }
  4906. }
  4907. /* Callback for iterate_over_threads. Find a resumed thread that has
  4908. a pending waitstatus. */
  4909. static int
  4910. resumed_thread_with_pending_status (struct thread_info *tp,
  4911. void *arg)
  4912. {
  4913. return tp->resumed () && tp->has_pending_waitstatus ();
  4914. }
  4915. /* Called when we get an event that may finish an in-line or
  4916. out-of-line (displaced stepping) step-over started previously.
  4917. Return true if the event is processed and we should go back to the
  4918. event loop; false if the caller should continue processing the
  4919. event. */
  4920. static int
  4921. finish_step_over (struct execution_control_state *ecs)
  4922. {
  4923. displaced_step_finish (ecs->event_thread, ecs->event_thread->stop_signal ());
  4924. bool had_step_over_info = step_over_info_valid_p ();
  4925. if (had_step_over_info)
  4926. {
  4927. /* If we're stepping over a breakpoint with all threads locked,
  4928. then only the thread that was stepped should be reporting
  4929. back an event. */
  4930. gdb_assert (ecs->event_thread->control.trap_expected);
  4931. clear_step_over_info ();
  4932. }
  4933. if (!target_is_non_stop_p ())
  4934. return 0;
  4935. /* Start a new step-over in another thread if there's one that
  4936. needs it. */
  4937. start_step_over ();
  4938. /* If we were stepping over a breakpoint before, and haven't started
  4939. a new in-line step-over sequence, then restart all other threads
  4940. (except the event thread). We can't do this in all-stop, as then
  4941. e.g., we wouldn't be able to issue any other remote packet until
  4942. these other threads stop. */
  4943. if (had_step_over_info && !step_over_info_valid_p ())
  4944. {
  4945. struct thread_info *pending;
  4946. /* If we only have threads with pending statuses, the restart
  4947. below won't restart any thread and so nothing re-inserts the
  4948. breakpoint we just stepped over. But we need it inserted
  4949. when we later process the pending events, otherwise if
  4950. another thread has a pending event for this breakpoint too,
  4951. we'd discard its event (because the breakpoint that
  4952. originally caused the event was no longer inserted). */
  4953. context_switch (ecs);
  4954. insert_breakpoints ();
  4955. restart_threads (ecs->event_thread);
  4956. /* If we have events pending, go through handle_inferior_event
  4957. again, picking up a pending event at random. This avoids
  4958. thread starvation. */
  4959. /* But not if we just stepped over a watchpoint in order to let
  4960. the instruction execute so we can evaluate its expression.
  4961. The set of watchpoints that triggered is recorded in the
  4962. breakpoint objects themselves (see bp->watchpoint_triggered).
  4963. If we processed another event first, that other event could
  4964. clobber this info. */
  4965. if (ecs->event_thread->stepping_over_watchpoint)
  4966. return 0;
  4967. pending = iterate_over_threads (resumed_thread_with_pending_status,
  4968. NULL);
  4969. if (pending != NULL)
  4970. {
  4971. struct thread_info *tp = ecs->event_thread;
  4972. struct regcache *regcache;
  4973. infrun_debug_printf ("found resumed threads with "
  4974. "pending events, saving status");
  4975. gdb_assert (pending != tp);
  4976. /* Record the event thread's event for later. */
  4977. save_waitstatus (tp, ecs->ws);
  4978. /* This was cleared early, by handle_inferior_event. Set it
  4979. so this pending event is considered by
  4980. do_target_wait. */
  4981. tp->set_resumed (true);
  4982. gdb_assert (!tp->executing ());
  4983. regcache = get_thread_regcache (tp);
  4984. tp->set_stop_pc (regcache_read_pc (regcache));
  4985. infrun_debug_printf ("saved stop_pc=%s for %s "
  4986. "(currently_stepping=%d)",
  4987. paddress (target_gdbarch (), tp->stop_pc ()),
  4988. tp->ptid.to_string ().c_str (),
  4989. currently_stepping (tp));
  4990. /* This in-line step-over finished; clear this so we won't
  4991. start a new one. This is what handle_signal_stop would
  4992. do, if we returned false. */
  4993. tp->stepping_over_breakpoint = 0;
  4994. /* Wake up the event loop again. */
  4995. mark_async_event_handler (infrun_async_inferior_event_token);
  4996. prepare_to_wait (ecs);
  4997. return 1;
  4998. }
  4999. }
  5000. return 0;
  5001. }
  5002. /* Come here when the program has stopped with a signal. */
  5003. static void
  5004. handle_signal_stop (struct execution_control_state *ecs)
  5005. {
  5006. struct frame_info *frame;
  5007. struct gdbarch *gdbarch;
  5008. int stopped_by_watchpoint;
  5009. enum stop_kind stop_soon;
  5010. int random_signal;
  5011. gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
  5012. ecs->event_thread->set_stop_signal (ecs->ws.sig ());
  5013. /* Do we need to clean up the state of a thread that has
  5014. completed a displaced single-step? (Doing so usually affects
  5015. the PC, so do it here, before we set stop_pc.) */
  5016. if (finish_step_over (ecs))
  5017. return;
  5018. /* If we either finished a single-step or hit a breakpoint, but
  5019. the user wanted this thread to be stopped, pretend we got a
  5020. SIG0 (generic unsignaled stop). */
  5021. if (ecs->event_thread->stop_requested
  5022. && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
  5023. ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
  5024. ecs->event_thread->set_stop_pc
  5025. (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
  5026. context_switch (ecs);
  5027. if (deprecated_context_hook)
  5028. deprecated_context_hook (ecs->event_thread->global_num);
  5029. if (debug_infrun)
  5030. {
  5031. struct regcache *regcache = get_thread_regcache (ecs->event_thread);
  5032. struct gdbarch *reg_gdbarch = regcache->arch ();
  5033. infrun_debug_printf
  5034. ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
  5035. if (target_stopped_by_watchpoint ())
  5036. {
  5037. CORE_ADDR addr;
  5038. infrun_debug_printf ("stopped by watchpoint");
  5039. if (target_stopped_data_address (current_inferior ()->top_target (),
  5040. &addr))
  5041. infrun_debug_printf ("stopped data address=%s",
  5042. paddress (reg_gdbarch, addr));
  5043. else
  5044. infrun_debug_printf ("(no data address available)");
  5045. }
  5046. }
  5047. /* This is originated from start_remote(), start_inferior() and
  5048. shared libraries hook functions. */
  5049. stop_soon = get_inferior_stop_soon (ecs);
  5050. if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
  5051. {
  5052. infrun_debug_printf ("quietly stopped");
  5053. stop_print_frame = true;
  5054. stop_waiting (ecs);
  5055. return;
  5056. }
  5057. /* This originates from attach_command(). We need to overwrite
  5058. the stop_signal here, because some kernels don't ignore a
  5059. SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
  5060. See more comments in inferior.h. On the other hand, if we
  5061. get a non-SIGSTOP, report it to the user - assume the backend
  5062. will handle the SIGSTOP if it should show up later.
  5063. Also consider that the attach is complete when we see a
  5064. SIGTRAP. Some systems (e.g. Windows), and stubs supporting
  5065. target extended-remote report it instead of a SIGSTOP
  5066. (e.g. gdbserver). We already rely on SIGTRAP being our
  5067. signal, so this is no exception.
  5068. Also consider that the attach is complete when we see a
  5069. GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
  5070. the target to stop all threads of the inferior, in case the
  5071. low level attach operation doesn't stop them implicitly. If
  5072. they weren't stopped implicitly, then the stub will report a
  5073. GDB_SIGNAL_0, meaning: stopped for no particular reason
  5074. other than GDB's request. */
  5075. if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
  5076. && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
  5077. || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
  5078. || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
  5079. {
  5080. stop_print_frame = true;
  5081. stop_waiting (ecs);
  5082. ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
  5083. return;
  5084. }
  5085. /* At this point, get hold of the now-current thread's frame. */
  5086. frame = get_current_frame ();
  5087. gdbarch = get_frame_arch (frame);
  5088. /* Pull the single step breakpoints out of the target. */
  5089. if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
  5090. {
  5091. struct regcache *regcache;
  5092. CORE_ADDR pc;
  5093. regcache = get_thread_regcache (ecs->event_thread);
  5094. const address_space *aspace = regcache->aspace ();
  5095. pc = regcache_read_pc (regcache);
  5096. /* However, before doing so, if this single-step breakpoint was
  5097. actually for another thread, set this thread up for moving
  5098. past it. */
  5099. if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
  5100. aspace, pc))
  5101. {
  5102. if (single_step_breakpoint_inserted_here_p (aspace, pc))
  5103. {
  5104. infrun_debug_printf ("[%s] hit another thread's single-step "
  5105. "breakpoint",
  5106. ecs->ptid.to_string ().c_str ());
  5107. ecs->hit_singlestep_breakpoint = 1;
  5108. }
  5109. }
  5110. else
  5111. {
  5112. infrun_debug_printf ("[%s] hit its single-step breakpoint",
  5113. ecs->ptid.to_string ().c_str ());
  5114. }
  5115. }
  5116. delete_just_stopped_threads_single_step_breakpoints ();
  5117. if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
  5118. && ecs->event_thread->control.trap_expected
  5119. && ecs->event_thread->stepping_over_watchpoint)
  5120. stopped_by_watchpoint = 0;
  5121. else
  5122. stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
  5123. /* If necessary, step over this watchpoint. We'll be back to display
  5124. it in a moment. */
  5125. if (stopped_by_watchpoint
  5126. && (target_have_steppable_watchpoint ()
  5127. || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
  5128. {
  5129. /* At this point, we are stopped at an instruction which has
  5130. attempted to write to a piece of memory under control of
  5131. a watchpoint. The instruction hasn't actually executed
  5132. yet. If we were to evaluate the watchpoint expression
  5133. now, we would get the old value, and therefore no change
  5134. would seem to have occurred.
  5135. In order to make watchpoints work `right', we really need
  5136. to complete the memory write, and then evaluate the
  5137. watchpoint expression. We do this by single-stepping the
  5138. target.
  5139. It may not be necessary to disable the watchpoint to step over
  5140. it. For example, the PA can (with some kernel cooperation)
  5141. single step over a watchpoint without disabling the watchpoint.
  5142. It is far more common to need to disable a watchpoint to step
  5143. the inferior over it. If we have non-steppable watchpoints,
  5144. we must disable the current watchpoint; it's simplest to
  5145. disable all watchpoints.
  5146. Any breakpoint at PC must also be stepped over -- if there's
  5147. one, it will have already triggered before the watchpoint
  5148. triggered, and we either already reported it to the user, or
  5149. it didn't cause a stop and we called keep_going. In either
  5150. case, if there was a breakpoint at PC, we must be trying to
  5151. step past it. */
  5152. ecs->event_thread->stepping_over_watchpoint = 1;
  5153. keep_going (ecs);
  5154. return;
  5155. }
  5156. ecs->event_thread->stepping_over_breakpoint = 0;
  5157. ecs->event_thread->stepping_over_watchpoint = 0;
  5158. bpstat_clear (&ecs->event_thread->control.stop_bpstat);
  5159. ecs->event_thread->control.stop_step = 0;
  5160. stop_print_frame = true;
  5161. stopped_by_random_signal = 0;
  5162. bpstat *stop_chain = nullptr;
  5163. /* Hide inlined functions starting here, unless we just performed stepi or
  5164. nexti. After stepi and nexti, always show the innermost frame (not any
  5165. inline function call sites). */
  5166. if (ecs->event_thread->control.step_range_end != 1)
  5167. {
  5168. const address_space *aspace
  5169. = get_thread_regcache (ecs->event_thread)->aspace ();
  5170. /* skip_inline_frames is expensive, so we avoid it if we can
  5171. determine that the address is one where functions cannot have
  5172. been inlined. This improves performance with inferiors that
  5173. load a lot of shared libraries, because the solib event
  5174. breakpoint is defined as the address of a function (i.e. not
  5175. inline). Note that we have to check the previous PC as well
  5176. as the current one to catch cases when we have just
  5177. single-stepped off a breakpoint prior to reinstating it.
  5178. Note that we're assuming that the code we single-step to is
  5179. not inline, but that's not definitive: there's nothing
  5180. preventing the event breakpoint function from containing
  5181. inlined code, and the single-step ending up there. If the
  5182. user had set a breakpoint on that inlined code, the missing
  5183. skip_inline_frames call would break things. Fortunately
  5184. that's an extremely unlikely scenario. */
  5185. if (!pc_at_non_inline_function (aspace,
  5186. ecs->event_thread->stop_pc (),
  5187. ecs->ws)
  5188. && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
  5189. && ecs->event_thread->control.trap_expected
  5190. && pc_at_non_inline_function (aspace,
  5191. ecs->event_thread->prev_pc,
  5192. ecs->ws)))
  5193. {
  5194. stop_chain = build_bpstat_chain (aspace,
  5195. ecs->event_thread->stop_pc (),
  5196. ecs->ws);
  5197. skip_inline_frames (ecs->event_thread, stop_chain);
  5198. /* Re-fetch current thread's frame in case that invalidated
  5199. the frame cache. */
  5200. frame = get_current_frame ();
  5201. gdbarch = get_frame_arch (frame);
  5202. }
  5203. }
  5204. if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
  5205. && ecs->event_thread->control.trap_expected
  5206. && gdbarch_single_step_through_delay_p (gdbarch)
  5207. && currently_stepping (ecs->event_thread))
  5208. {
  5209. /* We're trying to step off a breakpoint. Turns out that we're
  5210. also on an instruction that needs to be stepped multiple
  5211. times before it's been fully executing. E.g., architectures
  5212. with a delay slot. It needs to be stepped twice, once for
  5213. the instruction and once for the delay slot. */
  5214. int step_through_delay
  5215. = gdbarch_single_step_through_delay (gdbarch, frame);
  5216. if (step_through_delay)
  5217. infrun_debug_printf ("step through delay");
  5218. if (ecs->event_thread->control.step_range_end == 0
  5219. && step_through_delay)
  5220. {
  5221. /* The user issued a continue when stopped at a breakpoint.
  5222. Set up for another trap and get out of here. */
  5223. ecs->event_thread->stepping_over_breakpoint = 1;
  5224. keep_going (ecs);
  5225. return;
  5226. }
  5227. else if (step_through_delay)
  5228. {
  5229. /* The user issued a step when stopped at a breakpoint.
  5230. Maybe we should stop, maybe we should not - the delay
  5231. slot *might* correspond to a line of source. In any
  5232. case, don't decide that here, just set
  5233. ecs->stepping_over_breakpoint, making sure we
  5234. single-step again before breakpoints are re-inserted. */
  5235. ecs->event_thread->stepping_over_breakpoint = 1;
  5236. }
  5237. }
  5238. /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
  5239. handles this event. */
  5240. ecs->event_thread->control.stop_bpstat
  5241. = bpstat_stop_status (get_current_regcache ()->aspace (),
  5242. ecs->event_thread->stop_pc (),
  5243. ecs->event_thread, ecs->ws, stop_chain);
  5244. /* Following in case break condition called a
  5245. function. */
  5246. stop_print_frame = true;
  5247. /* This is where we handle "moribund" watchpoints. Unlike
  5248. software breakpoints traps, hardware watchpoint traps are
  5249. always distinguishable from random traps. If no high-level
  5250. watchpoint is associated with the reported stop data address
  5251. anymore, then the bpstat does not explain the signal ---
  5252. simply make sure to ignore it if `stopped_by_watchpoint' is
  5253. set. */
  5254. if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
  5255. && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
  5256. GDB_SIGNAL_TRAP)
  5257. && stopped_by_watchpoint)
  5258. {
  5259. infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
  5260. "ignoring");
  5261. }
  5262. /* NOTE: cagney/2003-03-29: These checks for a random signal
  5263. at one stage in the past included checks for an inferior
  5264. function call's call dummy's return breakpoint. The original
  5265. comment, that went with the test, read:
  5266. ``End of a stack dummy. Some systems (e.g. Sony news) give
  5267. another signal besides SIGTRAP, so check here as well as
  5268. above.''
  5269. If someone ever tries to get call dummys on a
  5270. non-executable stack to work (where the target would stop
  5271. with something like a SIGSEGV), then those tests might need
  5272. to be re-instated. Given, however, that the tests were only
  5273. enabled when momentary breakpoints were not being used, I
  5274. suspect that it won't be the case.
  5275. NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
  5276. be necessary for call dummies on a non-executable stack on
  5277. SPARC. */
  5278. /* See if the breakpoints module can explain the signal. */
  5279. random_signal
  5280. = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
  5281. ecs->event_thread->stop_signal ());
  5282. /* Maybe this was a trap for a software breakpoint that has since
  5283. been removed. */
  5284. if (random_signal && target_stopped_by_sw_breakpoint ())
  5285. {
  5286. if (gdbarch_program_breakpoint_here_p (gdbarch,
  5287. ecs->event_thread->stop_pc ()))
  5288. {
  5289. struct regcache *regcache;
  5290. int decr_pc;
  5291. /* Re-adjust PC to what the program would see if GDB was not
  5292. debugging it. */
  5293. regcache = get_thread_regcache (ecs->event_thread);
  5294. decr_pc = gdbarch_decr_pc_after_break (gdbarch);
  5295. if (decr_pc != 0)
  5296. {
  5297. gdb::optional<scoped_restore_tmpl<int>>
  5298. restore_operation_disable;
  5299. if (record_full_is_used ())
  5300. restore_operation_disable.emplace
  5301. (record_full_gdb_operation_disable_set ());
  5302. regcache_write_pc (regcache,
  5303. ecs->event_thread->stop_pc () + decr_pc);
  5304. }
  5305. }
  5306. else
  5307. {
  5308. /* A delayed software breakpoint event. Ignore the trap. */
  5309. infrun_debug_printf ("delayed software breakpoint trap, ignoring");
  5310. random_signal = 0;
  5311. }
  5312. }
  5313. /* Maybe this was a trap for a hardware breakpoint/watchpoint that
  5314. has since been removed. */
  5315. if (random_signal && target_stopped_by_hw_breakpoint ())
  5316. {
  5317. /* A delayed hardware breakpoint event. Ignore the trap. */
  5318. infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
  5319. "trap, ignoring");
  5320. random_signal = 0;
  5321. }
  5322. /* If not, perhaps stepping/nexting can. */
  5323. if (random_signal)
  5324. random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
  5325. && currently_stepping (ecs->event_thread));
  5326. /* Perhaps the thread hit a single-step breakpoint of _another_
  5327. thread. Single-step breakpoints are transparent to the
  5328. breakpoints module. */
  5329. if (random_signal)
  5330. random_signal = !ecs->hit_singlestep_breakpoint;
  5331. /* No? Perhaps we got a moribund watchpoint. */
  5332. if (random_signal)
  5333. random_signal = !stopped_by_watchpoint;
  5334. /* Always stop if the user explicitly requested this thread to
  5335. remain stopped. */
  5336. if (ecs->event_thread->stop_requested)
  5337. {
  5338. random_signal = 1;
  5339. infrun_debug_printf ("user-requested stop");
  5340. }
  5341. /* For the program's own signals, act according to
  5342. the signal handling tables. */
  5343. if (random_signal)
  5344. {
  5345. /* Signal not for debugging purposes. */
  5346. enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
  5347. infrun_debug_printf ("random signal (%s)",
  5348. gdb_signal_to_symbol_string (stop_signal));
  5349. stopped_by_random_signal = 1;
  5350. /* Always stop on signals if we're either just gaining control
  5351. of the program, or the user explicitly requested this thread
  5352. to remain stopped. */
  5353. if (stop_soon != NO_STOP_QUIETLY
  5354. || ecs->event_thread->stop_requested
  5355. || signal_stop_state (ecs->event_thread->stop_signal ()))
  5356. {
  5357. stop_waiting (ecs);
  5358. return;
  5359. }
  5360. /* Notify observers the signal has "handle print" set. Note we
  5361. returned early above if stopping; normal_stop handles the
  5362. printing in that case. */
  5363. if (signal_print[ecs->event_thread->stop_signal ()])
  5364. {
  5365. /* The signal table tells us to print about this signal. */
  5366. target_terminal::ours_for_output ();
  5367. gdb::observers::signal_received.notify (ecs->event_thread->stop_signal ());
  5368. target_terminal::inferior ();
  5369. }
  5370. /* Clear the signal if it should not be passed. */
  5371. if (signal_program[ecs->event_thread->stop_signal ()] == 0)
  5372. ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
  5373. if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
  5374. && ecs->event_thread->control.trap_expected
  5375. && ecs->event_thread->control.step_resume_breakpoint == NULL)
  5376. {
  5377. /* We were just starting a new sequence, attempting to
  5378. single-step off of a breakpoint and expecting a SIGTRAP.
  5379. Instead this signal arrives. This signal will take us out
  5380. of the stepping range so GDB needs to remember to, when
  5381. the signal handler returns, resume stepping off that
  5382. breakpoint. */
  5383. /* To simplify things, "continue" is forced to use the same
  5384. code paths as single-step - set a breakpoint at the
  5385. signal return address and then, once hit, step off that
  5386. breakpoint. */
  5387. infrun_debug_printf ("signal arrived while stepping over breakpoint");
  5388. insert_hp_step_resume_breakpoint_at_frame (frame);
  5389. ecs->event_thread->step_after_step_resume_breakpoint = 1;
  5390. /* Reset trap_expected to ensure breakpoints are re-inserted. */
  5391. ecs->event_thread->control.trap_expected = 0;
  5392. /* If we were nexting/stepping some other thread, switch to
  5393. it, so that we don't continue it, losing control. */
  5394. if (!switch_back_to_stepped_thread (ecs))
  5395. keep_going (ecs);
  5396. return;
  5397. }
  5398. if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
  5399. && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
  5400. ecs->event_thread)
  5401. || ecs->event_thread->control.step_range_end == 1)
  5402. && frame_id_eq (get_stack_frame_id (frame),
  5403. ecs->event_thread->control.step_stack_frame_id)
  5404. && ecs->event_thread->control.step_resume_breakpoint == NULL)
  5405. {
  5406. /* The inferior is about to take a signal that will take it
  5407. out of the single step range. Set a breakpoint at the
  5408. current PC (which is presumably where the signal handler
  5409. will eventually return) and then allow the inferior to
  5410. run free.
  5411. Note that this is only needed for a signal delivered
  5412. while in the single-step range. Nested signals aren't a
  5413. problem as they eventually all return. */
  5414. infrun_debug_printf ("signal may take us out of single-step range");
  5415. clear_step_over_info ();
  5416. insert_hp_step_resume_breakpoint_at_frame (frame);
  5417. ecs->event_thread->step_after_step_resume_breakpoint = 1;
  5418. /* Reset trap_expected to ensure breakpoints are re-inserted. */
  5419. ecs->event_thread->control.trap_expected = 0;
  5420. keep_going (ecs);
  5421. return;
  5422. }
  5423. /* Note: step_resume_breakpoint may be non-NULL. This occurs
  5424. when either there's a nested signal, or when there's a
  5425. pending signal enabled just as the signal handler returns
  5426. (leaving the inferior at the step-resume-breakpoint without
  5427. actually executing it). Either way continue until the
  5428. breakpoint is really hit. */
  5429. if (!switch_back_to_stepped_thread (ecs))
  5430. {
  5431. infrun_debug_printf ("random signal, keep going");
  5432. keep_going (ecs);
  5433. }
  5434. return;
  5435. }
  5436. process_event_stop_test (ecs);
  5437. }
  5438. /* Come here when we've got some debug event / signal we can explain
  5439. (IOW, not a random signal), and test whether it should cause a
  5440. stop, or whether we should resume the inferior (transparently).
  5441. E.g., could be a breakpoint whose condition evaluates false; we
  5442. could be still stepping within the line; etc. */
  5443. static void
  5444. process_event_stop_test (struct execution_control_state *ecs)
  5445. {
  5446. struct symtab_and_line stop_pc_sal;
  5447. struct frame_info *frame;
  5448. struct gdbarch *gdbarch;
  5449. CORE_ADDR jmp_buf_pc;
  5450. struct bpstat_what what;
  5451. /* Handle cases caused by hitting a breakpoint. */
  5452. frame = get_current_frame ();
  5453. gdbarch = get_frame_arch (frame);
  5454. what = bpstat_what (ecs->event_thread->control.stop_bpstat);
  5455. if (what.call_dummy)
  5456. {
  5457. stop_stack_dummy = what.call_dummy;
  5458. }
  5459. /* A few breakpoint types have callbacks associated (e.g.,
  5460. bp_jit_event). Run them now. */
  5461. bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
  5462. /* If we hit an internal event that triggers symbol changes, the
  5463. current frame will be invalidated within bpstat_what (e.g., if we
  5464. hit an internal solib event). Re-fetch it. */
  5465. frame = get_current_frame ();
  5466. gdbarch = get_frame_arch (frame);
  5467. switch (what.main_action)
  5468. {
  5469. case BPSTAT_WHAT_SET_LONGJMP_RESUME:
  5470. /* If we hit the breakpoint at longjmp while stepping, we
  5471. install a momentary breakpoint at the target of the
  5472. jmp_buf. */
  5473. infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
  5474. ecs->event_thread->stepping_over_breakpoint = 1;
  5475. if (what.is_longjmp)
  5476. {
  5477. struct value *arg_value;
  5478. /* If we set the longjmp breakpoint via a SystemTap probe,
  5479. then use it to extract the arguments. The destination PC
  5480. is the third argument to the probe. */
  5481. arg_value = probe_safe_evaluate_at_pc (frame, 2);
  5482. if (arg_value)
  5483. {
  5484. jmp_buf_pc = value_as_address (arg_value);
  5485. jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
  5486. }
  5487. else if (!gdbarch_get_longjmp_target_p (gdbarch)
  5488. || !gdbarch_get_longjmp_target (gdbarch,
  5489. frame, &jmp_buf_pc))
  5490. {
  5491. infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
  5492. "(!gdbarch_get_longjmp_target)");
  5493. keep_going (ecs);
  5494. return;
  5495. }
  5496. /* Insert a breakpoint at resume address. */
  5497. insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
  5498. }
  5499. else
  5500. check_exception_resume (ecs, frame);
  5501. keep_going (ecs);
  5502. return;
  5503. case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
  5504. {
  5505. struct frame_info *init_frame;
  5506. /* There are several cases to consider.
  5507. 1. The initiating frame no longer exists. In this case we
  5508. must stop, because the exception or longjmp has gone too
  5509. far.
  5510. 2. The initiating frame exists, and is the same as the
  5511. current frame. We stop, because the exception or longjmp
  5512. has been caught.
  5513. 3. The initiating frame exists and is different from the
  5514. current frame. This means the exception or longjmp has
  5515. been caught beneath the initiating frame, so keep going.
  5516. 4. longjmp breakpoint has been placed just to protect
  5517. against stale dummy frames and user is not interested in
  5518. stopping around longjmps. */
  5519. infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
  5520. gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
  5521. != NULL);
  5522. delete_exception_resume_breakpoint (ecs->event_thread);
  5523. if (what.is_longjmp)
  5524. {
  5525. check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
  5526. if (!frame_id_p (ecs->event_thread->initiating_frame))
  5527. {
  5528. /* Case 4. */
  5529. keep_going (ecs);
  5530. return;
  5531. }
  5532. }
  5533. init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
  5534. if (init_frame)
  5535. {
  5536. struct frame_id current_id
  5537. = get_frame_id (get_current_frame ());
  5538. if (frame_id_eq (current_id,
  5539. ecs->event_thread->initiating_frame))
  5540. {
  5541. /* Case 2. Fall through. */
  5542. }
  5543. else
  5544. {
  5545. /* Case 3. */
  5546. keep_going (ecs);
  5547. return;
  5548. }
  5549. }
  5550. /* For Cases 1 and 2, remove the step-resume breakpoint, if it
  5551. exists. */
  5552. delete_step_resume_breakpoint (ecs->event_thread);
  5553. end_stepping_range (ecs);
  5554. }
  5555. return;
  5556. case BPSTAT_WHAT_SINGLE:
  5557. infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
  5558. ecs->event_thread->stepping_over_breakpoint = 1;
  5559. /* Still need to check other stuff, at least the case where we
  5560. are stepping and step out of the right range. */
  5561. break;
  5562. case BPSTAT_WHAT_STEP_RESUME:
  5563. infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
  5564. delete_step_resume_breakpoint (ecs->event_thread);
  5565. if (ecs->event_thread->control.proceed_to_finish
  5566. && execution_direction == EXEC_REVERSE)
  5567. {
  5568. struct thread_info *tp = ecs->event_thread;
  5569. /* We are finishing a function in reverse, and just hit the
  5570. step-resume breakpoint at the start address of the
  5571. function, and we're almost there -- just need to back up
  5572. by one more single-step, which should take us back to the
  5573. function call. */
  5574. tp->control.step_range_start = tp->control.step_range_end = 1;
  5575. keep_going (ecs);
  5576. return;
  5577. }
  5578. fill_in_stop_func (gdbarch, ecs);
  5579. if (ecs->event_thread->stop_pc () == ecs->stop_func_start
  5580. && execution_direction == EXEC_REVERSE)
  5581. {
  5582. /* We are stepping over a function call in reverse, and just
  5583. hit the step-resume breakpoint at the start address of
  5584. the function. Go back to single-stepping, which should
  5585. take us back to the function call. */
  5586. ecs->event_thread->stepping_over_breakpoint = 1;
  5587. keep_going (ecs);
  5588. return;
  5589. }
  5590. break;
  5591. case BPSTAT_WHAT_STOP_NOISY:
  5592. infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
  5593. stop_print_frame = true;
  5594. /* Assume the thread stopped for a breakpoint. We'll still check
  5595. whether a/the breakpoint is there when the thread is next
  5596. resumed. */
  5597. ecs->event_thread->stepping_over_breakpoint = 1;
  5598. stop_waiting (ecs);
  5599. return;
  5600. case BPSTAT_WHAT_STOP_SILENT:
  5601. infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
  5602. stop_print_frame = false;
  5603. /* Assume the thread stopped for a breakpoint. We'll still check
  5604. whether a/the breakpoint is there when the thread is next
  5605. resumed. */
  5606. ecs->event_thread->stepping_over_breakpoint = 1;
  5607. stop_waiting (ecs);
  5608. return;
  5609. case BPSTAT_WHAT_HP_STEP_RESUME:
  5610. infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
  5611. delete_step_resume_breakpoint (ecs->event_thread);
  5612. if (ecs->event_thread->step_after_step_resume_breakpoint)
  5613. {
  5614. /* Back when the step-resume breakpoint was inserted, we
  5615. were trying to single-step off a breakpoint. Go back to
  5616. doing that. */
  5617. ecs->event_thread->step_after_step_resume_breakpoint = 0;
  5618. ecs->event_thread->stepping_over_breakpoint = 1;
  5619. keep_going (ecs);
  5620. return;
  5621. }
  5622. break;
  5623. case BPSTAT_WHAT_KEEP_CHECKING:
  5624. break;
  5625. }
  5626. /* If we stepped a permanent breakpoint and we had a high priority
  5627. step-resume breakpoint for the address we stepped, but we didn't
  5628. hit it, then we must have stepped into the signal handler. The
  5629. step-resume was only necessary to catch the case of _not_
  5630. stepping into the handler, so delete it, and fall through to
  5631. checking whether the step finished. */
  5632. if (ecs->event_thread->stepped_breakpoint)
  5633. {
  5634. struct breakpoint *sr_bp
  5635. = ecs->event_thread->control.step_resume_breakpoint;
  5636. if (sr_bp != NULL
  5637. && sr_bp->loc->permanent
  5638. && sr_bp->type == bp_hp_step_resume
  5639. && sr_bp->loc->address == ecs->event_thread->prev_pc)
  5640. {
  5641. infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
  5642. delete_step_resume_breakpoint (ecs->event_thread);
  5643. ecs->event_thread->step_after_step_resume_breakpoint = 0;
  5644. }
  5645. }
  5646. /* We come here if we hit a breakpoint but should not stop for it.
  5647. Possibly we also were stepping and should stop for that. So fall
  5648. through and test for stepping. But, if not stepping, do not
  5649. stop. */
  5650. /* In all-stop mode, if we're currently stepping but have stopped in
  5651. some other thread, we need to switch back to the stepped thread. */
  5652. if (switch_back_to_stepped_thread (ecs))
  5653. return;
  5654. if (ecs->event_thread->control.step_resume_breakpoint)
  5655. {
  5656. infrun_debug_printf ("step-resume breakpoint is inserted");
  5657. /* Having a step-resume breakpoint overrides anything
  5658. else having to do with stepping commands until
  5659. that breakpoint is reached. */
  5660. keep_going (ecs);
  5661. return;
  5662. }
  5663. if (ecs->event_thread->control.step_range_end == 0)
  5664. {
  5665. infrun_debug_printf ("no stepping, continue");
  5666. /* Likewise if we aren't even stepping. */
  5667. keep_going (ecs);
  5668. return;
  5669. }
  5670. /* Re-fetch current thread's frame in case the code above caused
  5671. the frame cache to be re-initialized, making our FRAME variable
  5672. a dangling pointer. */
  5673. frame = get_current_frame ();
  5674. gdbarch = get_frame_arch (frame);
  5675. fill_in_stop_func (gdbarch, ecs);
  5676. /* If stepping through a line, keep going if still within it.
  5677. Note that step_range_end is the address of the first instruction
  5678. beyond the step range, and NOT the address of the last instruction
  5679. within it!
  5680. Note also that during reverse execution, we may be stepping
  5681. through a function epilogue and therefore must detect when
  5682. the current-frame changes in the middle of a line. */
  5683. if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
  5684. ecs->event_thread)
  5685. && (execution_direction != EXEC_REVERSE
  5686. || frame_id_eq (get_frame_id (frame),
  5687. ecs->event_thread->control.step_frame_id)))
  5688. {
  5689. infrun_debug_printf
  5690. ("stepping inside range [%s-%s]",
  5691. paddress (gdbarch, ecs->event_thread->control.step_range_start),
  5692. paddress (gdbarch, ecs->event_thread->control.step_range_end));
  5693. /* Tentatively re-enable range stepping; `resume' disables it if
  5694. necessary (e.g., if we're stepping over a breakpoint or we
  5695. have software watchpoints). */
  5696. ecs->event_thread->control.may_range_step = 1;
  5697. /* When stepping backward, stop at beginning of line range
  5698. (unless it's the function entry point, in which case
  5699. keep going back to the call point). */
  5700. CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
  5701. if (stop_pc == ecs->event_thread->control.step_range_start
  5702. && stop_pc != ecs->stop_func_start
  5703. && execution_direction == EXEC_REVERSE)
  5704. end_stepping_range (ecs);
  5705. else
  5706. keep_going (ecs);
  5707. return;
  5708. }
  5709. /* We stepped out of the stepping range. */
  5710. /* If we are stepping at the source level and entered the runtime
  5711. loader dynamic symbol resolution code...
  5712. EXEC_FORWARD: we keep on single stepping until we exit the run
  5713. time loader code and reach the callee's address.
  5714. EXEC_REVERSE: we've already executed the callee (backward), and
  5715. the runtime loader code is handled just like any other
  5716. undebuggable function call. Now we need only keep stepping
  5717. backward through the trampoline code, and that's handled further
  5718. down, so there is nothing for us to do here. */
  5719. if (execution_direction != EXEC_REVERSE
  5720. && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
  5721. && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ()))
  5722. {
  5723. CORE_ADDR pc_after_resolver =
  5724. gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
  5725. infrun_debug_printf ("stepped into dynsym resolve code");
  5726. if (pc_after_resolver)
  5727. {
  5728. /* Set up a step-resume breakpoint at the address
  5729. indicated by SKIP_SOLIB_RESOLVER. */
  5730. symtab_and_line sr_sal;
  5731. sr_sal.pc = pc_after_resolver;
  5732. sr_sal.pspace = get_frame_program_space (frame);
  5733. insert_step_resume_breakpoint_at_sal (gdbarch,
  5734. sr_sal, null_frame_id);
  5735. }
  5736. keep_going (ecs);
  5737. return;
  5738. }
  5739. /* Step through an indirect branch thunk. */
  5740. if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
  5741. && gdbarch_in_indirect_branch_thunk (gdbarch,
  5742. ecs->event_thread->stop_pc ()))
  5743. {
  5744. infrun_debug_printf ("stepped into indirect branch thunk");
  5745. keep_going (ecs);
  5746. return;
  5747. }
  5748. if (ecs->event_thread->control.step_range_end != 1
  5749. && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
  5750. || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
  5751. && get_frame_type (frame) == SIGTRAMP_FRAME)
  5752. {
  5753. infrun_debug_printf ("stepped into signal trampoline");
  5754. /* The inferior, while doing a "step" or "next", has ended up in
  5755. a signal trampoline (either by a signal being delivered or by
  5756. the signal handler returning). Just single-step until the
  5757. inferior leaves the trampoline (either by calling the handler
  5758. or returning). */
  5759. keep_going (ecs);
  5760. return;
  5761. }
  5762. /* If we're in the return path from a shared library trampoline,
  5763. we want to proceed through the trampoline when stepping. */
  5764. /* macro/2012-04-25: This needs to come before the subroutine
  5765. call check below as on some targets return trampolines look
  5766. like subroutine calls (MIPS16 return thunks). */
  5767. if (gdbarch_in_solib_return_trampoline (gdbarch,
  5768. ecs->event_thread->stop_pc (),
  5769. ecs->stop_func_name)
  5770. && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
  5771. {
  5772. /* Determine where this trampoline returns. */
  5773. CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
  5774. CORE_ADDR real_stop_pc
  5775. = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
  5776. infrun_debug_printf ("stepped into solib return tramp");
  5777. /* Only proceed through if we know where it's going. */
  5778. if (real_stop_pc)
  5779. {
  5780. /* And put the step-breakpoint there and go until there. */
  5781. symtab_and_line sr_sal;
  5782. sr_sal.pc = real_stop_pc;
  5783. sr_sal.section = find_pc_overlay (sr_sal.pc);
  5784. sr_sal.pspace = get_frame_program_space (frame);
  5785. /* Do not specify what the fp should be when we stop since
  5786. on some machines the prologue is where the new fp value
  5787. is established. */
  5788. insert_step_resume_breakpoint_at_sal (gdbarch,
  5789. sr_sal, null_frame_id);
  5790. /* Restart without fiddling with the step ranges or
  5791. other state. */
  5792. keep_going (ecs);
  5793. return;
  5794. }
  5795. }
  5796. /* Check for subroutine calls. The check for the current frame
  5797. equalling the step ID is not necessary - the check of the
  5798. previous frame's ID is sufficient - but it is a common case and
  5799. cheaper than checking the previous frame's ID.
  5800. NOTE: frame_id_eq will never report two invalid frame IDs as
  5801. being equal, so to get into this block, both the current and
  5802. previous frame must have valid frame IDs. */
  5803. /* The outer_frame_id check is a heuristic to detect stepping
  5804. through startup code. If we step over an instruction which
  5805. sets the stack pointer from an invalid value to a valid value,
  5806. we may detect that as a subroutine call from the mythical
  5807. "outermost" function. This could be fixed by marking
  5808. outermost frames as !stack_p,code_p,special_p. Then the
  5809. initial outermost frame, before sp was valid, would
  5810. have code_addr == &_start. See the comment in frame_id_eq
  5811. for more. */
  5812. if (!frame_id_eq (get_stack_frame_id (frame),
  5813. ecs->event_thread->control.step_stack_frame_id)
  5814. && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
  5815. ecs->event_thread->control.step_stack_frame_id)
  5816. && (!frame_id_eq (ecs->event_thread->control.step_stack_frame_id,
  5817. outer_frame_id)
  5818. || (ecs->event_thread->control.step_start_function
  5819. != find_pc_function (ecs->event_thread->stop_pc ())))))
  5820. {
  5821. CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
  5822. CORE_ADDR real_stop_pc;
  5823. infrun_debug_printf ("stepped into subroutine");
  5824. if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
  5825. {
  5826. /* I presume that step_over_calls is only 0 when we're
  5827. supposed to be stepping at the assembly language level
  5828. ("stepi"). Just stop. */
  5829. /* And this works the same backward as frontward. MVS */
  5830. end_stepping_range (ecs);
  5831. return;
  5832. }
  5833. /* Reverse stepping through solib trampolines. */
  5834. if (execution_direction == EXEC_REVERSE
  5835. && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
  5836. && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
  5837. || (ecs->stop_func_start == 0
  5838. && in_solib_dynsym_resolve_code (stop_pc))))
  5839. {
  5840. /* Any solib trampoline code can be handled in reverse
  5841. by simply continuing to single-step. We have already
  5842. executed the solib function (backwards), and a few
  5843. steps will take us back through the trampoline to the
  5844. caller. */
  5845. keep_going (ecs);
  5846. return;
  5847. }
  5848. if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
  5849. {
  5850. /* We're doing a "next".
  5851. Normal (forward) execution: set a breakpoint at the
  5852. callee's return address (the address at which the caller
  5853. will resume).
  5854. Reverse (backward) execution. set the step-resume
  5855. breakpoint at the start of the function that we just
  5856. stepped into (backwards), and continue to there. When we
  5857. get there, we'll need to single-step back to the caller. */
  5858. if (execution_direction == EXEC_REVERSE)
  5859. {
  5860. /* If we're already at the start of the function, we've either
  5861. just stepped backward into a single instruction function,
  5862. or stepped back out of a signal handler to the first instruction
  5863. of the function. Just keep going, which will single-step back
  5864. to the caller. */
  5865. if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
  5866. {
  5867. /* Normal function call return (static or dynamic). */
  5868. symtab_and_line sr_sal;
  5869. sr_sal.pc = ecs->stop_func_start;
  5870. sr_sal.pspace = get_frame_program_space (frame);
  5871. insert_step_resume_breakpoint_at_sal (gdbarch,
  5872. sr_sal, null_frame_id);
  5873. }
  5874. }
  5875. else
  5876. insert_step_resume_breakpoint_at_caller (frame);
  5877. keep_going (ecs);
  5878. return;
  5879. }
  5880. /* If we are in a function call trampoline (a stub between the
  5881. calling routine and the real function), locate the real
  5882. function. That's what tells us (a) whether we want to step
  5883. into it at all, and (b) what prologue we want to run to the
  5884. end of, if we do step into it. */
  5885. real_stop_pc = skip_language_trampoline (frame, stop_pc);
  5886. if (real_stop_pc == 0)
  5887. real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
  5888. if (real_stop_pc != 0)
  5889. ecs->stop_func_start = real_stop_pc;
  5890. if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
  5891. {
  5892. symtab_and_line sr_sal;
  5893. sr_sal.pc = ecs->stop_func_start;
  5894. sr_sal.pspace = get_frame_program_space (frame);
  5895. insert_step_resume_breakpoint_at_sal (gdbarch,
  5896. sr_sal, null_frame_id);
  5897. keep_going (ecs);
  5898. return;
  5899. }
  5900. /* If we have line number information for the function we are
  5901. thinking of stepping into and the function isn't on the skip
  5902. list, step into it.
  5903. If there are several symtabs at that PC (e.g. with include
  5904. files), just want to know whether *any* of them have line
  5905. numbers. find_pc_line handles this. */
  5906. {
  5907. struct symtab_and_line tmp_sal;
  5908. tmp_sal = find_pc_line (ecs->stop_func_start, 0);
  5909. if (tmp_sal.line != 0
  5910. && !function_name_is_marked_for_skip (ecs->stop_func_name,
  5911. tmp_sal)
  5912. && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
  5913. {
  5914. if (execution_direction == EXEC_REVERSE)
  5915. handle_step_into_function_backward (gdbarch, ecs);
  5916. else
  5917. handle_step_into_function (gdbarch, ecs);
  5918. return;
  5919. }
  5920. }
  5921. /* If we have no line number and the step-stop-if-no-debug is
  5922. set, we stop the step so that the user has a chance to switch
  5923. in assembly mode. */
  5924. if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
  5925. && step_stop_if_no_debug)
  5926. {
  5927. end_stepping_range (ecs);
  5928. return;
  5929. }
  5930. if (execution_direction == EXEC_REVERSE)
  5931. {
  5932. /* If we're already at the start of the function, we've either just
  5933. stepped backward into a single instruction function without line
  5934. number info, or stepped back out of a signal handler to the first
  5935. instruction of the function without line number info. Just keep
  5936. going, which will single-step back to the caller. */
  5937. if (ecs->stop_func_start != stop_pc)
  5938. {
  5939. /* Set a breakpoint at callee's start address.
  5940. From there we can step once and be back in the caller. */
  5941. symtab_and_line sr_sal;
  5942. sr_sal.pc = ecs->stop_func_start;
  5943. sr_sal.pspace = get_frame_program_space (frame);
  5944. insert_step_resume_breakpoint_at_sal (gdbarch,
  5945. sr_sal, null_frame_id);
  5946. }
  5947. }
  5948. else
  5949. /* Set a breakpoint at callee's return address (the address
  5950. at which the caller will resume). */
  5951. insert_step_resume_breakpoint_at_caller (frame);
  5952. keep_going (ecs);
  5953. return;
  5954. }
  5955. /* Reverse stepping through solib trampolines. */
  5956. if (execution_direction == EXEC_REVERSE
  5957. && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
  5958. {
  5959. CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
  5960. if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
  5961. || (ecs->stop_func_start == 0
  5962. && in_solib_dynsym_resolve_code (stop_pc)))
  5963. {
  5964. /* Any solib trampoline code can be handled in reverse
  5965. by simply continuing to single-step. We have already
  5966. executed the solib function (backwards), and a few
  5967. steps will take us back through the trampoline to the
  5968. caller. */
  5969. keep_going (ecs);
  5970. return;
  5971. }
  5972. else if (in_solib_dynsym_resolve_code (stop_pc))
  5973. {
  5974. /* Stepped backward into the solib dynsym resolver.
  5975. Set a breakpoint at its start and continue, then
  5976. one more step will take us out. */
  5977. symtab_and_line sr_sal;
  5978. sr_sal.pc = ecs->stop_func_start;
  5979. sr_sal.pspace = get_frame_program_space (frame);
  5980. insert_step_resume_breakpoint_at_sal (gdbarch,
  5981. sr_sal, null_frame_id);
  5982. keep_going (ecs);
  5983. return;
  5984. }
  5985. }
  5986. /* This always returns the sal for the inner-most frame when we are in a
  5987. stack of inlined frames, even if GDB actually believes that it is in a
  5988. more outer frame. This is checked for below by calls to
  5989. inline_skipped_frames. */
  5990. stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
  5991. /* NOTE: tausq/2004-05-24: This if block used to be done before all
  5992. the trampoline processing logic, however, there are some trampolines
  5993. that have no names, so we should do trampoline handling first. */
  5994. if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
  5995. && ecs->stop_func_name == NULL
  5996. && stop_pc_sal.line == 0)
  5997. {
  5998. infrun_debug_printf ("stepped into undebuggable function");
  5999. /* The inferior just stepped into, or returned to, an
  6000. undebuggable function (where there is no debugging information
  6001. and no line number corresponding to the address where the
  6002. inferior stopped). Since we want to skip this kind of code,
  6003. we keep going until the inferior returns from this
  6004. function - unless the user has asked us not to (via
  6005. set step-mode) or we no longer know how to get back
  6006. to the call site. */
  6007. if (step_stop_if_no_debug
  6008. || !frame_id_p (frame_unwind_caller_id (frame)))
  6009. {
  6010. /* If we have no line number and the step-stop-if-no-debug
  6011. is set, we stop the step so that the user has a chance to
  6012. switch in assembly mode. */
  6013. end_stepping_range (ecs);
  6014. return;
  6015. }
  6016. else
  6017. {
  6018. /* Set a breakpoint at callee's return address (the address
  6019. at which the caller will resume). */
  6020. insert_step_resume_breakpoint_at_caller (frame);
  6021. keep_going (ecs);
  6022. return;
  6023. }
  6024. }
  6025. if (ecs->event_thread->control.step_range_end == 1)
  6026. {
  6027. /* It is stepi or nexti. We always want to stop stepping after
  6028. one instruction. */
  6029. infrun_debug_printf ("stepi/nexti");
  6030. end_stepping_range (ecs);
  6031. return;
  6032. }
  6033. if (stop_pc_sal.line == 0)
  6034. {
  6035. /* We have no line number information. That means to stop
  6036. stepping (does this always happen right after one instruction,
  6037. when we do "s" in a function with no line numbers,
  6038. or can this happen as a result of a return or longjmp?). */
  6039. infrun_debug_printf ("line number info");
  6040. end_stepping_range (ecs);
  6041. return;
  6042. }
  6043. /* Look for "calls" to inlined functions, part one. If the inline
  6044. frame machinery detected some skipped call sites, we have entered
  6045. a new inline function. */
  6046. if (frame_id_eq (get_frame_id (get_current_frame ()),
  6047. ecs->event_thread->control.step_frame_id)
  6048. && inline_skipped_frames (ecs->event_thread))
  6049. {
  6050. infrun_debug_printf ("stepped into inlined function");
  6051. symtab_and_line call_sal = find_frame_sal (get_current_frame ());
  6052. if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
  6053. {
  6054. /* For "step", we're going to stop. But if the call site
  6055. for this inlined function is on the same source line as
  6056. we were previously stepping, go down into the function
  6057. first. Otherwise stop at the call site. */
  6058. if (call_sal.line == ecs->event_thread->current_line
  6059. && call_sal.symtab == ecs->event_thread->current_symtab)
  6060. {
  6061. step_into_inline_frame (ecs->event_thread);
  6062. if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
  6063. {
  6064. keep_going (ecs);
  6065. return;
  6066. }
  6067. }
  6068. end_stepping_range (ecs);
  6069. return;
  6070. }
  6071. else
  6072. {
  6073. /* For "next", we should stop at the call site if it is on a
  6074. different source line. Otherwise continue through the
  6075. inlined function. */
  6076. if (call_sal.line == ecs->event_thread->current_line
  6077. && call_sal.symtab == ecs->event_thread->current_symtab)
  6078. keep_going (ecs);
  6079. else
  6080. end_stepping_range (ecs);
  6081. return;
  6082. }
  6083. }
  6084. /* Look for "calls" to inlined functions, part two. If we are still
  6085. in the same real function we were stepping through, but we have
  6086. to go further up to find the exact frame ID, we are stepping
  6087. through a more inlined call beyond its call site. */
  6088. if (get_frame_type (get_current_frame ()) == INLINE_FRAME
  6089. && !frame_id_eq (get_frame_id (get_current_frame ()),
  6090. ecs->event_thread->control.step_frame_id)
  6091. && stepped_in_from (get_current_frame (),
  6092. ecs->event_thread->control.step_frame_id))
  6093. {
  6094. infrun_debug_printf ("stepping through inlined function");
  6095. if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
  6096. || inline_frame_is_marked_for_skip (false, ecs->event_thread))
  6097. keep_going (ecs);
  6098. else
  6099. end_stepping_range (ecs);
  6100. return;
  6101. }
  6102. bool refresh_step_info = true;
  6103. if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
  6104. && (ecs->event_thread->current_line != stop_pc_sal.line
  6105. || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
  6106. {
  6107. /* We are at a different line. */
  6108. if (stop_pc_sal.is_stmt)
  6109. {
  6110. /* We are at the start of a statement.
  6111. So stop. Note that we don't stop if we step into the middle of a
  6112. statement. That is said to make things like for (;;) statements
  6113. work better. */
  6114. infrun_debug_printf ("stepped to a different line");
  6115. end_stepping_range (ecs);
  6116. return;
  6117. }
  6118. else if (frame_id_eq (get_frame_id (get_current_frame ()),
  6119. ecs->event_thread->control.step_frame_id))
  6120. {
  6121. /* We are not at the start of a statement, and we have not changed
  6122. frame.
  6123. We ignore this line table entry, and continue stepping forward,
  6124. looking for a better place to stop. */
  6125. refresh_step_info = false;
  6126. infrun_debug_printf ("stepped to a different line, but "
  6127. "it's not the start of a statement");
  6128. }
  6129. else
  6130. {
  6131. /* We are not the start of a statement, and we have changed frame.
  6132. We ignore this line table entry, and continue stepping forward,
  6133. looking for a better place to stop. Keep refresh_step_info at
  6134. true to note that the frame has changed, but ignore the line
  6135. number to make sure we don't ignore a subsequent entry with the
  6136. same line number. */
  6137. stop_pc_sal.line = 0;
  6138. infrun_debug_printf ("stepped to a different frame, but "
  6139. "it's not the start of a statement");
  6140. }
  6141. }
  6142. /* We aren't done stepping.
  6143. Optimize by setting the stepping range to the line.
  6144. (We might not be in the original line, but if we entered a
  6145. new line in mid-statement, we continue stepping. This makes
  6146. things like for(;;) statements work better.)
  6147. If we entered a SAL that indicates a non-statement line table entry,
  6148. then we update the stepping range, but we don't update the step info,
  6149. which includes things like the line number we are stepping away from.
  6150. This means we will stop when we find a line table entry that is marked
  6151. as is-statement, even if it matches the non-statement one we just
  6152. stepped into. */
  6153. ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
  6154. ecs->event_thread->control.step_range_end = stop_pc_sal.end;
  6155. ecs->event_thread->control.may_range_step = 1;
  6156. infrun_debug_printf
  6157. ("updated step range, start = %s, end = %s, may_range_step = %d",
  6158. paddress (gdbarch, ecs->event_thread->control.step_range_start),
  6159. paddress (gdbarch, ecs->event_thread->control.step_range_end),
  6160. ecs->event_thread->control.may_range_step);
  6161. if (refresh_step_info)
  6162. set_step_info (ecs->event_thread, frame, stop_pc_sal);
  6163. infrun_debug_printf ("keep going");
  6164. keep_going (ecs);
  6165. }
  6166. static bool restart_stepped_thread (process_stratum_target *resume_target,
  6167. ptid_t resume_ptid);
  6168. /* In all-stop mode, if we're currently stepping but have stopped in
  6169. some other thread, we may need to switch back to the stepped
  6170. thread. Returns true we set the inferior running, false if we left
  6171. it stopped (and the event needs further processing). */
  6172. static bool
  6173. switch_back_to_stepped_thread (struct execution_control_state *ecs)
  6174. {
  6175. if (!target_is_non_stop_p ())
  6176. {
  6177. /* If any thread is blocked on some internal breakpoint, and we
  6178. simply need to step over that breakpoint to get it going
  6179. again, do that first. */
  6180. /* However, if we see an event for the stepping thread, then we
  6181. know all other threads have been moved past their breakpoints
  6182. already. Let the caller check whether the step is finished,
  6183. etc., before deciding to move it past a breakpoint. */
  6184. if (ecs->event_thread->control.step_range_end != 0)
  6185. return false;
  6186. /* Check if the current thread is blocked on an incomplete
  6187. step-over, interrupted by a random signal. */
  6188. if (ecs->event_thread->control.trap_expected
  6189. && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
  6190. {
  6191. infrun_debug_printf
  6192. ("need to finish step-over of [%s]",
  6193. ecs->event_thread->ptid.to_string ().c_str ());
  6194. keep_going (ecs);
  6195. return true;
  6196. }
  6197. /* Check if the current thread is blocked by a single-step
  6198. breakpoint of another thread. */
  6199. if (ecs->hit_singlestep_breakpoint)
  6200. {
  6201. infrun_debug_printf ("need to step [%s] over single-step breakpoint",
  6202. ecs->ptid.to_string ().c_str ());
  6203. keep_going (ecs);
  6204. return true;
  6205. }
  6206. /* If this thread needs yet another step-over (e.g., stepping
  6207. through a delay slot), do it first before moving on to
  6208. another thread. */
  6209. if (thread_still_needs_step_over (ecs->event_thread))
  6210. {
  6211. infrun_debug_printf
  6212. ("thread [%s] still needs step-over",
  6213. ecs->event_thread->ptid.to_string ().c_str ());
  6214. keep_going (ecs);
  6215. return true;
  6216. }
  6217. /* If scheduler locking applies even if not stepping, there's no
  6218. need to walk over threads. Above we've checked whether the
  6219. current thread is stepping. If some other thread not the
  6220. event thread is stepping, then it must be that scheduler
  6221. locking is not in effect. */
  6222. if (schedlock_applies (ecs->event_thread))
  6223. return false;
  6224. /* Otherwise, we no longer expect a trap in the current thread.
  6225. Clear the trap_expected flag before switching back -- this is
  6226. what keep_going does as well, if we call it. */
  6227. ecs->event_thread->control.trap_expected = 0;
  6228. /* Likewise, clear the signal if it should not be passed. */
  6229. if (!signal_program[ecs->event_thread->stop_signal ()])
  6230. ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
  6231. if (restart_stepped_thread (ecs->target, ecs->ptid))
  6232. {
  6233. prepare_to_wait (ecs);
  6234. return true;
  6235. }
  6236. switch_to_thread (ecs->event_thread);
  6237. }
  6238. return false;
  6239. }
  6240. /* Look for the thread that was stepping, and resume it.
  6241. RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
  6242. is resuming. Return true if a thread was started, false
  6243. otherwise. */
  6244. static bool
  6245. restart_stepped_thread (process_stratum_target *resume_target,
  6246. ptid_t resume_ptid)
  6247. {
  6248. /* Do all pending step-overs before actually proceeding with
  6249. step/next/etc. */
  6250. if (start_step_over ())
  6251. return true;
  6252. for (thread_info *tp : all_threads_safe ())
  6253. {
  6254. if (tp->state == THREAD_EXITED)
  6255. continue;
  6256. if (tp->has_pending_waitstatus ())
  6257. continue;
  6258. /* Ignore threads of processes the caller is not
  6259. resuming. */
  6260. if (!sched_multi
  6261. && (tp->inf->process_target () != resume_target
  6262. || tp->inf->pid != resume_ptid.pid ()))
  6263. continue;
  6264. if (tp->control.trap_expected)
  6265. {
  6266. infrun_debug_printf ("switching back to stepped thread (step-over)");
  6267. if (keep_going_stepped_thread (tp))
  6268. return true;
  6269. }
  6270. }
  6271. for (thread_info *tp : all_threads_safe ())
  6272. {
  6273. if (tp->state == THREAD_EXITED)
  6274. continue;
  6275. if (tp->has_pending_waitstatus ())
  6276. continue;
  6277. /* Ignore threads of processes the caller is not
  6278. resuming. */
  6279. if (!sched_multi
  6280. && (tp->inf->process_target () != resume_target
  6281. || tp->inf->pid != resume_ptid.pid ()))
  6282. continue;
  6283. /* Did we find the stepping thread? */
  6284. if (tp->control.step_range_end)
  6285. {
  6286. infrun_debug_printf ("switching back to stepped thread (stepping)");
  6287. if (keep_going_stepped_thread (tp))
  6288. return true;
  6289. }
  6290. }
  6291. return false;
  6292. }
  6293. /* See infrun.h. */
  6294. void
  6295. restart_after_all_stop_detach (process_stratum_target *proc_target)
  6296. {
  6297. /* Note we don't check target_is_non_stop_p() here, because the
  6298. current inferior may no longer have a process_stratum target
  6299. pushed, as we just detached. */
  6300. /* See if we have a THREAD_RUNNING thread that need to be
  6301. re-resumed. If we have any thread that is already executing,
  6302. then we don't need to resume the target -- it is already been
  6303. resumed. With the remote target (in all-stop), it's even
  6304. impossible to issue another resumption if the target is already
  6305. resumed, until the target reports a stop. */
  6306. for (thread_info *thr : all_threads (proc_target))
  6307. {
  6308. if (thr->state != THREAD_RUNNING)
  6309. continue;
  6310. /* If we have any thread that is already executing, then we
  6311. don't need to resume the target -- it is already been
  6312. resumed. */
  6313. if (thr->executing ())
  6314. return;
  6315. /* If we have a pending event to process, skip resuming the
  6316. target and go straight to processing it. */
  6317. if (thr->resumed () && thr->has_pending_waitstatus ())
  6318. return;
  6319. }
  6320. /* Alright, we need to re-resume the target. If a thread was
  6321. stepping, we need to restart it stepping. */
  6322. if (restart_stepped_thread (proc_target, minus_one_ptid))
  6323. return;
  6324. /* Otherwise, find the first THREAD_RUNNING thread and resume
  6325. it. */
  6326. for (thread_info *thr : all_threads (proc_target))
  6327. {
  6328. if (thr->state != THREAD_RUNNING)
  6329. continue;
  6330. execution_control_state ecs;
  6331. reset_ecs (&ecs, thr);
  6332. switch_to_thread (thr);
  6333. keep_going (&ecs);
  6334. return;
  6335. }
  6336. }
  6337. /* Set a previously stepped thread back to stepping. Returns true on
  6338. success, false if the resume is not possible (e.g., the thread
  6339. vanished). */
  6340. static bool
  6341. keep_going_stepped_thread (struct thread_info *tp)
  6342. {
  6343. struct frame_info *frame;
  6344. struct execution_control_state ecss;
  6345. struct execution_control_state *ecs = &ecss;
  6346. /* If the stepping thread exited, then don't try to switch back and
  6347. resume it, which could fail in several different ways depending
  6348. on the target. Instead, just keep going.
  6349. We can find a stepping dead thread in the thread list in two
  6350. cases:
  6351. - The target supports thread exit events, and when the target
  6352. tries to delete the thread from the thread list, inferior_ptid
  6353. pointed at the exiting thread. In such case, calling
  6354. delete_thread does not really remove the thread from the list;
  6355. instead, the thread is left listed, with 'exited' state.
  6356. - The target's debug interface does not support thread exit
  6357. events, and so we have no idea whatsoever if the previously
  6358. stepping thread is still alive. For that reason, we need to
  6359. synchronously query the target now. */
  6360. if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
  6361. {
  6362. infrun_debug_printf ("not resuming previously stepped thread, it has "
  6363. "vanished");
  6364. delete_thread (tp);
  6365. return false;
  6366. }
  6367. infrun_debug_printf ("resuming previously stepped thread");
  6368. reset_ecs (ecs, tp);
  6369. switch_to_thread (tp);
  6370. tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
  6371. frame = get_current_frame ();
  6372. /* If the PC of the thread we were trying to single-step has
  6373. changed, then that thread has trapped or been signaled, but the
  6374. event has not been reported to GDB yet. Re-poll the target
  6375. looking for this particular thread's event (i.e. temporarily
  6376. enable schedlock) by:
  6377. - setting a break at the current PC
  6378. - resuming that particular thread, only (by setting trap
  6379. expected)
  6380. This prevents us continuously moving the single-step breakpoint
  6381. forward, one instruction at a time, overstepping. */
  6382. if (tp->stop_pc () != tp->prev_pc)
  6383. {
  6384. ptid_t resume_ptid;
  6385. infrun_debug_printf ("expected thread advanced also (%s -> %s)",
  6386. paddress (target_gdbarch (), tp->prev_pc),
  6387. paddress (target_gdbarch (), tp->stop_pc ()));
  6388. /* Clear the info of the previous step-over, as it's no longer
  6389. valid (if the thread was trying to step over a breakpoint, it
  6390. has already succeeded). It's what keep_going would do too,
  6391. if we called it. Do this before trying to insert the sss
  6392. breakpoint, otherwise if we were previously trying to step
  6393. over this exact address in another thread, the breakpoint is
  6394. skipped. */
  6395. clear_step_over_info ();
  6396. tp->control.trap_expected = 0;
  6397. insert_single_step_breakpoint (get_frame_arch (frame),
  6398. get_frame_address_space (frame),
  6399. tp->stop_pc ());
  6400. tp->set_resumed (true);
  6401. resume_ptid = internal_resume_ptid (tp->control.stepping_command);
  6402. do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
  6403. }
  6404. else
  6405. {
  6406. infrun_debug_printf ("expected thread still hasn't advanced");
  6407. keep_going_pass_signal (ecs);
  6408. }
  6409. return true;
  6410. }
  6411. /* Is thread TP in the middle of (software or hardware)
  6412. single-stepping? (Note the result of this function must never be
  6413. passed directly as target_resume's STEP parameter.) */
  6414. static bool
  6415. currently_stepping (struct thread_info *tp)
  6416. {
  6417. return ((tp->control.step_range_end
  6418. && tp->control.step_resume_breakpoint == NULL)
  6419. || tp->control.trap_expected
  6420. || tp->stepped_breakpoint
  6421. || bpstat_should_step ());
  6422. }
  6423. /* Inferior has stepped into a subroutine call with source code that
  6424. we should not step over. Do step to the first line of code in
  6425. it. */
  6426. static void
  6427. handle_step_into_function (struct gdbarch *gdbarch,
  6428. struct execution_control_state *ecs)
  6429. {
  6430. fill_in_stop_func (gdbarch, ecs);
  6431. compunit_symtab *cust
  6432. = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
  6433. if (cust != NULL && compunit_language (cust) != language_asm)
  6434. ecs->stop_func_start
  6435. = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
  6436. symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
  6437. /* Use the step_resume_break to step until the end of the prologue,
  6438. even if that involves jumps (as it seems to on the vax under
  6439. 4.2). */
  6440. /* If the prologue ends in the middle of a source line, continue to
  6441. the end of that source line (if it is still within the function).
  6442. Otherwise, just go to end of prologue. */
  6443. if (stop_func_sal.end
  6444. && stop_func_sal.pc != ecs->stop_func_start
  6445. && stop_func_sal.end < ecs->stop_func_end)
  6446. ecs->stop_func_start = stop_func_sal.end;
  6447. /* Architectures which require breakpoint adjustment might not be able
  6448. to place a breakpoint at the computed address. If so, the test
  6449. ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
  6450. ecs->stop_func_start to an address at which a breakpoint may be
  6451. legitimately placed.
  6452. Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
  6453. made, GDB will enter an infinite loop when stepping through
  6454. optimized code consisting of VLIW instructions which contain
  6455. subinstructions corresponding to different source lines. On
  6456. FR-V, it's not permitted to place a breakpoint on any but the
  6457. first subinstruction of a VLIW instruction. When a breakpoint is
  6458. set, GDB will adjust the breakpoint address to the beginning of
  6459. the VLIW instruction. Thus, we need to make the corresponding
  6460. adjustment here when computing the stop address. */
  6461. if (gdbarch_adjust_breakpoint_address_p (gdbarch))
  6462. {
  6463. ecs->stop_func_start
  6464. = gdbarch_adjust_breakpoint_address (gdbarch,
  6465. ecs->stop_func_start);
  6466. }
  6467. if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
  6468. {
  6469. /* We are already there: stop now. */
  6470. end_stepping_range (ecs);
  6471. return;
  6472. }
  6473. else
  6474. {
  6475. /* Put the step-breakpoint there and go until there. */
  6476. symtab_and_line sr_sal;
  6477. sr_sal.pc = ecs->stop_func_start;
  6478. sr_sal.section = find_pc_overlay (ecs->stop_func_start);
  6479. sr_sal.pspace = get_frame_program_space (get_current_frame ());
  6480. /* Do not specify what the fp should be when we stop since on
  6481. some machines the prologue is where the new fp value is
  6482. established. */
  6483. insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
  6484. /* And make sure stepping stops right away then. */
  6485. ecs->event_thread->control.step_range_end
  6486. = ecs->event_thread->control.step_range_start;
  6487. }
  6488. keep_going (ecs);
  6489. }
  6490. /* Inferior has stepped backward into a subroutine call with source
  6491. code that we should not step over. Do step to the beginning of the
  6492. last line of code in it. */
  6493. static void
  6494. handle_step_into_function_backward (struct gdbarch *gdbarch,
  6495. struct execution_control_state *ecs)
  6496. {
  6497. struct compunit_symtab *cust;
  6498. struct symtab_and_line stop_func_sal;
  6499. fill_in_stop_func (gdbarch, ecs);
  6500. cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
  6501. if (cust != NULL && compunit_language (cust) != language_asm)
  6502. ecs->stop_func_start
  6503. = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
  6504. stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
  6505. /* OK, we're just going to keep stepping here. */
  6506. if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
  6507. {
  6508. /* We're there already. Just stop stepping now. */
  6509. end_stepping_range (ecs);
  6510. }
  6511. else
  6512. {
  6513. /* Else just reset the step range and keep going.
  6514. No step-resume breakpoint, they don't work for
  6515. epilogues, which can have multiple entry paths. */
  6516. ecs->event_thread->control.step_range_start = stop_func_sal.pc;
  6517. ecs->event_thread->control.step_range_end = stop_func_sal.end;
  6518. keep_going (ecs);
  6519. }
  6520. return;
  6521. }
  6522. /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
  6523. This is used to both functions and to skip over code. */
  6524. static void
  6525. insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
  6526. struct symtab_and_line sr_sal,
  6527. struct frame_id sr_id,
  6528. enum bptype sr_type)
  6529. {
  6530. /* There should never be more than one step-resume or longjmp-resume
  6531. breakpoint per thread, so we should never be setting a new
  6532. step_resume_breakpoint when one is already active. */
  6533. gdb_assert (inferior_thread ()->control.step_resume_breakpoint == NULL);
  6534. gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
  6535. infrun_debug_printf ("inserting step-resume breakpoint at %s",
  6536. paddress (gdbarch, sr_sal.pc));
  6537. inferior_thread ()->control.step_resume_breakpoint
  6538. = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
  6539. }
  6540. void
  6541. insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
  6542. struct symtab_and_line sr_sal,
  6543. struct frame_id sr_id)
  6544. {
  6545. insert_step_resume_breakpoint_at_sal_1 (gdbarch,
  6546. sr_sal, sr_id,
  6547. bp_step_resume);
  6548. }
  6549. /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
  6550. This is used to skip a potential signal handler.
  6551. This is called with the interrupted function's frame. The signal
  6552. handler, when it returns, will resume the interrupted function at
  6553. RETURN_FRAME.pc. */
  6554. static void
  6555. insert_hp_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
  6556. {
  6557. gdb_assert (return_frame != NULL);
  6558. struct gdbarch *gdbarch = get_frame_arch (return_frame);
  6559. symtab_and_line sr_sal;
  6560. sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
  6561. sr_sal.section = find_pc_overlay (sr_sal.pc);
  6562. sr_sal.pspace = get_frame_program_space (return_frame);
  6563. insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
  6564. get_stack_frame_id (return_frame),
  6565. bp_hp_step_resume);
  6566. }
  6567. /* Insert a "step-resume breakpoint" at the previous frame's PC. This
  6568. is used to skip a function after stepping into it (for "next" or if
  6569. the called function has no debugging information).
  6570. The current function has almost always been reached by single
  6571. stepping a call or return instruction. NEXT_FRAME belongs to the
  6572. current function, and the breakpoint will be set at the caller's
  6573. resume address.
  6574. This is a separate function rather than reusing
  6575. insert_hp_step_resume_breakpoint_at_frame in order to avoid
  6576. get_prev_frame, which may stop prematurely (see the implementation
  6577. of frame_unwind_caller_id for an example). */
  6578. static void
  6579. insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
  6580. {
  6581. /* We shouldn't have gotten here if we don't know where the call site
  6582. is. */
  6583. gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
  6584. struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
  6585. symtab_and_line sr_sal;
  6586. sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
  6587. frame_unwind_caller_pc (next_frame));
  6588. sr_sal.section = find_pc_overlay (sr_sal.pc);
  6589. sr_sal.pspace = frame_unwind_program_space (next_frame);
  6590. insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
  6591. frame_unwind_caller_id (next_frame));
  6592. }
  6593. /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
  6594. new breakpoint at the target of a jmp_buf. The handling of
  6595. longjmp-resume uses the same mechanisms used for handling
  6596. "step-resume" breakpoints. */
  6597. static void
  6598. insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
  6599. {
  6600. /* There should never be more than one longjmp-resume breakpoint per
  6601. thread, so we should never be setting a new
  6602. longjmp_resume_breakpoint when one is already active. */
  6603. gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == NULL);
  6604. infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
  6605. paddress (gdbarch, pc));
  6606. inferior_thread ()->control.exception_resume_breakpoint =
  6607. set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
  6608. }
  6609. /* Insert an exception resume breakpoint. TP is the thread throwing
  6610. the exception. The block B is the block of the unwinder debug hook
  6611. function. FRAME is the frame corresponding to the call to this
  6612. function. SYM is the symbol of the function argument holding the
  6613. target PC of the exception. */
  6614. static void
  6615. insert_exception_resume_breakpoint (struct thread_info *tp,
  6616. const struct block *b,
  6617. struct frame_info *frame,
  6618. struct symbol *sym)
  6619. {
  6620. try
  6621. {
  6622. struct block_symbol vsym;
  6623. struct value *value;
  6624. CORE_ADDR handler;
  6625. struct breakpoint *bp;
  6626. vsym = lookup_symbol_search_name (sym->search_name (),
  6627. b, VAR_DOMAIN);
  6628. value = read_var_value (vsym.symbol, vsym.block, frame);
  6629. /* If the value was optimized out, revert to the old behavior. */
  6630. if (! value_optimized_out (value))
  6631. {
  6632. handler = value_as_address (value);
  6633. infrun_debug_printf ("exception resume at %lx",
  6634. (unsigned long) handler);
  6635. bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
  6636. handler,
  6637. bp_exception_resume).release ();
  6638. /* set_momentary_breakpoint_at_pc invalidates FRAME. */
  6639. frame = NULL;
  6640. bp->thread = tp->global_num;
  6641. inferior_thread ()->control.exception_resume_breakpoint = bp;
  6642. }
  6643. }
  6644. catch (const gdb_exception_error &e)
  6645. {
  6646. /* We want to ignore errors here. */
  6647. }
  6648. }
  6649. /* A helper for check_exception_resume that sets an
  6650. exception-breakpoint based on a SystemTap probe. */
  6651. static void
  6652. insert_exception_resume_from_probe (struct thread_info *tp,
  6653. const struct bound_probe *probe,
  6654. struct frame_info *frame)
  6655. {
  6656. struct value *arg_value;
  6657. CORE_ADDR handler;
  6658. struct breakpoint *bp;
  6659. arg_value = probe_safe_evaluate_at_pc (frame, 1);
  6660. if (!arg_value)
  6661. return;
  6662. handler = value_as_address (arg_value);
  6663. infrun_debug_printf ("exception resume at %s",
  6664. paddress (probe->objfile->arch (), handler));
  6665. bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
  6666. handler, bp_exception_resume).release ();
  6667. bp->thread = tp->global_num;
  6668. inferior_thread ()->control.exception_resume_breakpoint = bp;
  6669. }
  6670. /* This is called when an exception has been intercepted. Check to
  6671. see whether the exception's destination is of interest, and if so,
  6672. set an exception resume breakpoint there. */
  6673. static void
  6674. check_exception_resume (struct execution_control_state *ecs,
  6675. struct frame_info *frame)
  6676. {
  6677. struct bound_probe probe;
  6678. struct symbol *func;
  6679. /* First see if this exception unwinding breakpoint was set via a
  6680. SystemTap probe point. If so, the probe has two arguments: the
  6681. CFA and the HANDLER. We ignore the CFA, extract the handler, and
  6682. set a breakpoint there. */
  6683. probe = find_probe_by_pc (get_frame_pc (frame));
  6684. if (probe.prob)
  6685. {
  6686. insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
  6687. return;
  6688. }
  6689. func = get_frame_function (frame);
  6690. if (!func)
  6691. return;
  6692. try
  6693. {
  6694. const struct block *b;
  6695. struct block_iterator iter;
  6696. struct symbol *sym;
  6697. int argno = 0;
  6698. /* The exception breakpoint is a thread-specific breakpoint on
  6699. the unwinder's debug hook, declared as:
  6700. void _Unwind_DebugHook (void *cfa, void *handler);
  6701. The CFA argument indicates the frame to which control is
  6702. about to be transferred. HANDLER is the destination PC.
  6703. We ignore the CFA and set a temporary breakpoint at HANDLER.
  6704. This is not extremely efficient but it avoids issues in gdb
  6705. with computing the DWARF CFA, and it also works even in weird
  6706. cases such as throwing an exception from inside a signal
  6707. handler. */
  6708. b = SYMBOL_BLOCK_VALUE (func);
  6709. ALL_BLOCK_SYMBOLS (b, iter, sym)
  6710. {
  6711. if (!sym->is_argument ())
  6712. continue;
  6713. if (argno == 0)
  6714. ++argno;
  6715. else
  6716. {
  6717. insert_exception_resume_breakpoint (ecs->event_thread,
  6718. b, frame, sym);
  6719. break;
  6720. }
  6721. }
  6722. }
  6723. catch (const gdb_exception_error &e)
  6724. {
  6725. }
  6726. }
  6727. static void
  6728. stop_waiting (struct execution_control_state *ecs)
  6729. {
  6730. infrun_debug_printf ("stop_waiting");
  6731. /* Let callers know we don't want to wait for the inferior anymore. */
  6732. ecs->wait_some_more = 0;
  6733. /* If all-stop, but there exists a non-stop target, stop all
  6734. threads now that we're presenting the stop to the user. */
  6735. if (!non_stop && exists_non_stop_target ())
  6736. stop_all_threads ("presenting stop to user in all-stop");
  6737. }
  6738. /* Like keep_going, but passes the signal to the inferior, even if the
  6739. signal is set to nopass. */
  6740. static void
  6741. keep_going_pass_signal (struct execution_control_state *ecs)
  6742. {
  6743. gdb_assert (ecs->event_thread->ptid == inferior_ptid);
  6744. gdb_assert (!ecs->event_thread->resumed ());
  6745. /* Save the pc before execution, to compare with pc after stop. */
  6746. ecs->event_thread->prev_pc
  6747. = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
  6748. if (ecs->event_thread->control.trap_expected)
  6749. {
  6750. struct thread_info *tp = ecs->event_thread;
  6751. infrun_debug_printf ("%s has trap_expected set, "
  6752. "resuming to collect trap",
  6753. tp->ptid.to_string ().c_str ());
  6754. /* We haven't yet gotten our trap, and either: intercepted a
  6755. non-signal event (e.g., a fork); or took a signal which we
  6756. are supposed to pass through to the inferior. Simply
  6757. continue. */
  6758. resume (ecs->event_thread->stop_signal ());
  6759. }
  6760. else if (step_over_info_valid_p ())
  6761. {
  6762. /* Another thread is stepping over a breakpoint in-line. If
  6763. this thread needs a step-over too, queue the request. In
  6764. either case, this resume must be deferred for later. */
  6765. struct thread_info *tp = ecs->event_thread;
  6766. if (ecs->hit_singlestep_breakpoint
  6767. || thread_still_needs_step_over (tp))
  6768. {
  6769. infrun_debug_printf ("step-over already in progress: "
  6770. "step-over for %s deferred",
  6771. tp->ptid.to_string ().c_str ());
  6772. global_thread_step_over_chain_enqueue (tp);
  6773. }
  6774. else
  6775. infrun_debug_printf ("step-over in progress: resume of %s deferred",
  6776. tp->ptid.to_string ().c_str ());
  6777. }
  6778. else
  6779. {
  6780. struct regcache *regcache = get_current_regcache ();
  6781. int remove_bp;
  6782. int remove_wps;
  6783. step_over_what step_what;
  6784. /* Either the trap was not expected, but we are continuing
  6785. anyway (if we got a signal, the user asked it be passed to
  6786. the child)
  6787. -- or --
  6788. We got our expected trap, but decided we should resume from
  6789. it.
  6790. We're going to run this baby now!
  6791. Note that insert_breakpoints won't try to re-insert
  6792. already inserted breakpoints. Therefore, we don't
  6793. care if breakpoints were already inserted, or not. */
  6794. /* If we need to step over a breakpoint, and we're not using
  6795. displaced stepping to do so, insert all breakpoints
  6796. (watchpoints, etc.) but the one we're stepping over, step one
  6797. instruction, and then re-insert the breakpoint when that step
  6798. is finished. */
  6799. step_what = thread_still_needs_step_over (ecs->event_thread);
  6800. remove_bp = (ecs->hit_singlestep_breakpoint
  6801. || (step_what & STEP_OVER_BREAKPOINT));
  6802. remove_wps = (step_what & STEP_OVER_WATCHPOINT);
  6803. /* We can't use displaced stepping if we need to step past a
  6804. watchpoint. The instruction copied to the scratch pad would
  6805. still trigger the watchpoint. */
  6806. if (remove_bp
  6807. && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
  6808. {
  6809. set_step_over_info (regcache->aspace (),
  6810. regcache_read_pc (regcache), remove_wps,
  6811. ecs->event_thread->global_num);
  6812. }
  6813. else if (remove_wps)
  6814. set_step_over_info (NULL, 0, remove_wps, -1);
  6815. /* If we now need to do an in-line step-over, we need to stop
  6816. all other threads. Note this must be done before
  6817. insert_breakpoints below, because that removes the breakpoint
  6818. we're about to step over, otherwise other threads could miss
  6819. it. */
  6820. if (step_over_info_valid_p () && target_is_non_stop_p ())
  6821. stop_all_threads ("starting in-line step-over");
  6822. /* Stop stepping if inserting breakpoints fails. */
  6823. try
  6824. {
  6825. insert_breakpoints ();
  6826. }
  6827. catch (const gdb_exception_error &e)
  6828. {
  6829. exception_print (gdb_stderr, e);
  6830. stop_waiting (ecs);
  6831. clear_step_over_info ();
  6832. return;
  6833. }
  6834. ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
  6835. resume (ecs->event_thread->stop_signal ());
  6836. }
  6837. prepare_to_wait (ecs);
  6838. }
  6839. /* Called when we should continue running the inferior, because the
  6840. current event doesn't cause a user visible stop. This does the
  6841. resuming part; waiting for the next event is done elsewhere. */
  6842. static void
  6843. keep_going (struct execution_control_state *ecs)
  6844. {
  6845. if (ecs->event_thread->control.trap_expected
  6846. && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
  6847. ecs->event_thread->control.trap_expected = 0;
  6848. if (!signal_program[ecs->event_thread->stop_signal ()])
  6849. ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
  6850. keep_going_pass_signal (ecs);
  6851. }
  6852. /* This function normally comes after a resume, before
  6853. handle_inferior_event exits. It takes care of any last bits of
  6854. housekeeping, and sets the all-important wait_some_more flag. */
  6855. static void
  6856. prepare_to_wait (struct execution_control_state *ecs)
  6857. {
  6858. infrun_debug_printf ("prepare_to_wait");
  6859. ecs->wait_some_more = 1;
  6860. /* If the target can't async, emulate it by marking the infrun event
  6861. handler such that as soon as we get back to the event-loop, we
  6862. immediately end up in fetch_inferior_event again calling
  6863. target_wait. */
  6864. if (!target_can_async_p ())
  6865. mark_infrun_async_event_handler ();
  6866. }
  6867. /* We are done with the step range of a step/next/si/ni command.
  6868. Called once for each n of a "step n" operation. */
  6869. static void
  6870. end_stepping_range (struct execution_control_state *ecs)
  6871. {
  6872. ecs->event_thread->control.stop_step = 1;
  6873. stop_waiting (ecs);
  6874. }
  6875. /* Several print_*_reason functions to print why the inferior has stopped.
  6876. We always print something when the inferior exits, or receives a signal.
  6877. The rest of the cases are dealt with later on in normal_stop and
  6878. print_it_typical. Ideally there should be a call to one of these
  6879. print_*_reason functions functions from handle_inferior_event each time
  6880. stop_waiting is called.
  6881. Note that we don't call these directly, instead we delegate that to
  6882. the interpreters, through observers. Interpreters then call these
  6883. with whatever uiout is right. */
  6884. void
  6885. print_end_stepping_range_reason (struct ui_out *uiout)
  6886. {
  6887. /* For CLI-like interpreters, print nothing. */
  6888. if (uiout->is_mi_like_p ())
  6889. {
  6890. uiout->field_string ("reason",
  6891. async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
  6892. }
  6893. }
  6894. void
  6895. print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
  6896. {
  6897. annotate_signalled ();
  6898. if (uiout->is_mi_like_p ())
  6899. uiout->field_string
  6900. ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
  6901. uiout->text ("\nProgram terminated with signal ");
  6902. annotate_signal_name ();
  6903. uiout->field_string ("signal-name",
  6904. gdb_signal_to_name (siggnal));
  6905. annotate_signal_name_end ();
  6906. uiout->text (", ");
  6907. annotate_signal_string ();
  6908. uiout->field_string ("signal-meaning",
  6909. gdb_signal_to_string (siggnal));
  6910. annotate_signal_string_end ();
  6911. uiout->text (".\n");
  6912. uiout->text ("The program no longer exists.\n");
  6913. }
  6914. void
  6915. print_exited_reason (struct ui_out *uiout, int exitstatus)
  6916. {
  6917. struct inferior *inf = current_inferior ();
  6918. std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
  6919. annotate_exited (exitstatus);
  6920. if (exitstatus)
  6921. {
  6922. if (uiout->is_mi_like_p ())
  6923. uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
  6924. std::string exit_code_str
  6925. = string_printf ("0%o", (unsigned int) exitstatus);
  6926. uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
  6927. plongest (inf->num), pidstr.c_str (),
  6928. string_field ("exit-code", exit_code_str.c_str ()));
  6929. }
  6930. else
  6931. {
  6932. if (uiout->is_mi_like_p ())
  6933. uiout->field_string
  6934. ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
  6935. uiout->message ("[Inferior %s (%s) exited normally]\n",
  6936. plongest (inf->num), pidstr.c_str ());
  6937. }
  6938. }
  6939. void
  6940. print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
  6941. {
  6942. struct thread_info *thr = inferior_thread ();
  6943. annotate_signal ();
  6944. if (uiout->is_mi_like_p ())
  6945. ;
  6946. else if (show_thread_that_caused_stop ())
  6947. {
  6948. uiout->text ("\nThread ");
  6949. uiout->field_string ("thread-id", print_thread_id (thr));
  6950. const char *name = thread_name (thr);
  6951. if (name != NULL)
  6952. {
  6953. uiout->text (" \"");
  6954. uiout->field_string ("name", name);
  6955. uiout->text ("\"");
  6956. }
  6957. }
  6958. else
  6959. uiout->text ("\nProgram");
  6960. if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
  6961. uiout->text (" stopped");
  6962. else
  6963. {
  6964. uiout->text (" received signal ");
  6965. annotate_signal_name ();
  6966. if (uiout->is_mi_like_p ())
  6967. uiout->field_string
  6968. ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
  6969. uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
  6970. annotate_signal_name_end ();
  6971. uiout->text (", ");
  6972. annotate_signal_string ();
  6973. uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
  6974. struct regcache *regcache = get_current_regcache ();
  6975. struct gdbarch *gdbarch = regcache->arch ();
  6976. if (gdbarch_report_signal_info_p (gdbarch))
  6977. gdbarch_report_signal_info (gdbarch, uiout, siggnal);
  6978. annotate_signal_string_end ();
  6979. }
  6980. uiout->text (".\n");
  6981. }
  6982. void
  6983. print_no_history_reason (struct ui_out *uiout)
  6984. {
  6985. uiout->text ("\nNo more reverse-execution history.\n");
  6986. }
  6987. /* Print current location without a level number, if we have changed
  6988. functions or hit a breakpoint. Print source line if we have one.
  6989. bpstat_print contains the logic deciding in detail what to print,
  6990. based on the event(s) that just occurred. */
  6991. static void
  6992. print_stop_location (const target_waitstatus &ws)
  6993. {
  6994. int bpstat_ret;
  6995. enum print_what source_flag;
  6996. int do_frame_printing = 1;
  6997. struct thread_info *tp = inferior_thread ();
  6998. bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
  6999. switch (bpstat_ret)
  7000. {
  7001. case PRINT_UNKNOWN:
  7002. /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
  7003. should) carry around the function and does (or should) use
  7004. that when doing a frame comparison. */
  7005. if (tp->control.stop_step
  7006. && frame_id_eq (tp->control.step_frame_id,
  7007. get_frame_id (get_current_frame ()))
  7008. && (tp->control.step_start_function
  7009. == find_pc_function (tp->stop_pc ())))
  7010. {
  7011. /* Finished step, just print source line. */
  7012. source_flag = SRC_LINE;
  7013. }
  7014. else
  7015. {
  7016. /* Print location and source line. */
  7017. source_flag = SRC_AND_LOC;
  7018. }
  7019. break;
  7020. case PRINT_SRC_AND_LOC:
  7021. /* Print location and source line. */
  7022. source_flag = SRC_AND_LOC;
  7023. break;
  7024. case PRINT_SRC_ONLY:
  7025. source_flag = SRC_LINE;
  7026. break;
  7027. case PRINT_NOTHING:
  7028. /* Something bogus. */
  7029. source_flag = SRC_LINE;
  7030. do_frame_printing = 0;
  7031. break;
  7032. default:
  7033. internal_error (__FILE__, __LINE__, _("Unknown value."));
  7034. }
  7035. /* The behavior of this routine with respect to the source
  7036. flag is:
  7037. SRC_LINE: Print only source line
  7038. LOCATION: Print only location
  7039. SRC_AND_LOC: Print location and source line. */
  7040. if (do_frame_printing)
  7041. print_stack_frame (get_selected_frame (NULL), 0, source_flag, 1);
  7042. }
  7043. /* See infrun.h. */
  7044. void
  7045. print_stop_event (struct ui_out *uiout, bool displays)
  7046. {
  7047. struct target_waitstatus last;
  7048. struct thread_info *tp;
  7049. get_last_target_status (nullptr, nullptr, &last);
  7050. {
  7051. scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
  7052. print_stop_location (last);
  7053. /* Display the auto-display expressions. */
  7054. if (displays)
  7055. do_displays ();
  7056. }
  7057. tp = inferior_thread ();
  7058. if (tp->thread_fsm () != nullptr
  7059. && tp->thread_fsm ()->finished_p ())
  7060. {
  7061. struct return_value_info *rv;
  7062. rv = tp->thread_fsm ()->return_value ();
  7063. if (rv != nullptr)
  7064. print_return_value (uiout, rv);
  7065. }
  7066. }
  7067. /* See infrun.h. */
  7068. void
  7069. maybe_remove_breakpoints (void)
  7070. {
  7071. if (!breakpoints_should_be_inserted_now () && target_has_execution ())
  7072. {
  7073. if (remove_breakpoints ())
  7074. {
  7075. target_terminal::ours_for_output ();
  7076. gdb_printf (_("Cannot remove breakpoints because "
  7077. "program is no longer writable.\nFurther "
  7078. "execution is probably impossible.\n"));
  7079. }
  7080. }
  7081. }
  7082. /* The execution context that just caused a normal stop. */
  7083. struct stop_context
  7084. {
  7085. stop_context ();
  7086. DISABLE_COPY_AND_ASSIGN (stop_context);
  7087. bool changed () const;
  7088. /* The stop ID. */
  7089. ULONGEST stop_id;
  7090. /* The event PTID. */
  7091. ptid_t ptid;
  7092. /* If stopp for a thread event, this is the thread that caused the
  7093. stop. */
  7094. thread_info_ref thread;
  7095. /* The inferior that caused the stop. */
  7096. int inf_num;
  7097. };
  7098. /* Initializes a new stop context. If stopped for a thread event, this
  7099. takes a strong reference to the thread. */
  7100. stop_context::stop_context ()
  7101. {
  7102. stop_id = get_stop_id ();
  7103. ptid = inferior_ptid;
  7104. inf_num = current_inferior ()->num;
  7105. if (inferior_ptid != null_ptid)
  7106. {
  7107. /* Take a strong reference so that the thread can't be deleted
  7108. yet. */
  7109. thread = thread_info_ref::new_reference (inferior_thread ());
  7110. }
  7111. }
  7112. /* Return true if the current context no longer matches the saved stop
  7113. context. */
  7114. bool
  7115. stop_context::changed () const
  7116. {
  7117. if (ptid != inferior_ptid)
  7118. return true;
  7119. if (inf_num != current_inferior ()->num)
  7120. return true;
  7121. if (thread != NULL && thread->state != THREAD_STOPPED)
  7122. return true;
  7123. if (get_stop_id () != stop_id)
  7124. return true;
  7125. return false;
  7126. }
  7127. /* See infrun.h. */
  7128. int
  7129. normal_stop (void)
  7130. {
  7131. struct target_waitstatus last;
  7132. get_last_target_status (nullptr, nullptr, &last);
  7133. new_stop_id ();
  7134. /* If an exception is thrown from this point on, make sure to
  7135. propagate GDB's knowledge of the executing state to the
  7136. frontend/user running state. A QUIT is an easy exception to see
  7137. here, so do this before any filtered output. */
  7138. ptid_t finish_ptid = null_ptid;
  7139. if (!non_stop)
  7140. finish_ptid = minus_one_ptid;
  7141. else if (last.kind () == TARGET_WAITKIND_SIGNALLED
  7142. || last.kind () == TARGET_WAITKIND_EXITED)
  7143. {
  7144. /* On some targets, we may still have live threads in the
  7145. inferior when we get a process exit event. E.g., for
  7146. "checkpoint", when the current checkpoint/fork exits,
  7147. linux-fork.c automatically switches to another fork from
  7148. within target_mourn_inferior. */
  7149. if (inferior_ptid != null_ptid)
  7150. finish_ptid = ptid_t (inferior_ptid.pid ());
  7151. }
  7152. else if (last.kind () != TARGET_WAITKIND_NO_RESUMED)
  7153. finish_ptid = inferior_ptid;
  7154. gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
  7155. if (finish_ptid != null_ptid)
  7156. {
  7157. maybe_finish_thread_state.emplace
  7158. (user_visible_resume_target (finish_ptid), finish_ptid);
  7159. }
  7160. /* As we're presenting a stop, and potentially removing breakpoints,
  7161. update the thread list so we can tell whether there are threads
  7162. running on the target. With target remote, for example, we can
  7163. only learn about new threads when we explicitly update the thread
  7164. list. Do this before notifying the interpreters about signal
  7165. stops, end of stepping ranges, etc., so that the "new thread"
  7166. output is emitted before e.g., "Program received signal FOO",
  7167. instead of after. */
  7168. update_thread_list ();
  7169. if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
  7170. gdb::observers::signal_received.notify (inferior_thread ()->stop_signal ());
  7171. /* As with the notification of thread events, we want to delay
  7172. notifying the user that we've switched thread context until
  7173. the inferior actually stops.
  7174. There's no point in saying anything if the inferior has exited.
  7175. Note that SIGNALLED here means "exited with a signal", not
  7176. "received a signal".
  7177. Also skip saying anything in non-stop mode. In that mode, as we
  7178. don't want GDB to switch threads behind the user's back, to avoid
  7179. races where the user is typing a command to apply to thread x,
  7180. but GDB switches to thread y before the user finishes entering
  7181. the command, fetch_inferior_event installs a cleanup to restore
  7182. the current thread back to the thread the user had selected right
  7183. after this event is handled, so we're not really switching, only
  7184. informing of a stop. */
  7185. if (!non_stop
  7186. && previous_inferior_ptid != inferior_ptid
  7187. && target_has_execution ()
  7188. && last.kind () != TARGET_WAITKIND_SIGNALLED
  7189. && last.kind () != TARGET_WAITKIND_EXITED
  7190. && last.kind () != TARGET_WAITKIND_NO_RESUMED)
  7191. {
  7192. SWITCH_THRU_ALL_UIS ()
  7193. {
  7194. target_terminal::ours_for_output ();
  7195. gdb_printf (_("[Switching to %s]\n"),
  7196. target_pid_to_str (inferior_ptid).c_str ());
  7197. annotate_thread_changed ();
  7198. }
  7199. previous_inferior_ptid = inferior_ptid;
  7200. }
  7201. if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
  7202. {
  7203. SWITCH_THRU_ALL_UIS ()
  7204. if (current_ui->prompt_state == PROMPT_BLOCKED)
  7205. {
  7206. target_terminal::ours_for_output ();
  7207. gdb_printf (_("No unwaited-for children left.\n"));
  7208. }
  7209. }
  7210. /* Note: this depends on the update_thread_list call above. */
  7211. maybe_remove_breakpoints ();
  7212. /* If an auto-display called a function and that got a signal,
  7213. delete that auto-display to avoid an infinite recursion. */
  7214. if (stopped_by_random_signal)
  7215. disable_current_display ();
  7216. SWITCH_THRU_ALL_UIS ()
  7217. {
  7218. async_enable_stdin ();
  7219. }
  7220. /* Let the user/frontend see the threads as stopped. */
  7221. maybe_finish_thread_state.reset ();
  7222. /* Select innermost stack frame - i.e., current frame is frame 0,
  7223. and current location is based on that. Handle the case where the
  7224. dummy call is returning after being stopped. E.g. the dummy call
  7225. previously hit a breakpoint. (If the dummy call returns
  7226. normally, we won't reach here.) Do this before the stop hook is
  7227. run, so that it doesn't get to see the temporary dummy frame,
  7228. which is not where we'll present the stop. */
  7229. if (has_stack_frames ())
  7230. {
  7231. if (stop_stack_dummy == STOP_STACK_DUMMY)
  7232. {
  7233. /* Pop the empty frame that contains the stack dummy. This
  7234. also restores inferior state prior to the call (struct
  7235. infcall_suspend_state). */
  7236. struct frame_info *frame = get_current_frame ();
  7237. gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
  7238. frame_pop (frame);
  7239. /* frame_pop calls reinit_frame_cache as the last thing it
  7240. does which means there's now no selected frame. */
  7241. }
  7242. select_frame (get_current_frame ());
  7243. /* Set the current source location. */
  7244. set_current_sal_from_frame (get_current_frame ());
  7245. }
  7246. /* Look up the hook_stop and run it (CLI internally handles problem
  7247. of stop_command's pre-hook not existing). */
  7248. stop_context saved_context;
  7249. try
  7250. {
  7251. execute_cmd_pre_hook (stop_command);
  7252. }
  7253. catch (const gdb_exception &ex)
  7254. {
  7255. exception_fprintf (gdb_stderr, ex,
  7256. "Error while running hook_stop:\n");
  7257. }
  7258. /* If the stop hook resumes the target, then there's no point in
  7259. trying to notify about the previous stop; its context is
  7260. gone. Likewise if the command switches thread or inferior --
  7261. the observers would print a stop for the wrong
  7262. thread/inferior. */
  7263. if (saved_context.changed ())
  7264. return 1;
  7265. /* Notify observers about the stop. This is where the interpreters
  7266. print the stop event. */
  7267. if (inferior_ptid != null_ptid)
  7268. gdb::observers::normal_stop.notify (inferior_thread ()->control.stop_bpstat,
  7269. stop_print_frame);
  7270. else
  7271. gdb::observers::normal_stop.notify (NULL, stop_print_frame);
  7272. annotate_stopped ();
  7273. if (target_has_execution ())
  7274. {
  7275. if (last.kind () != TARGET_WAITKIND_SIGNALLED
  7276. && last.kind () != TARGET_WAITKIND_EXITED
  7277. && last.kind () != TARGET_WAITKIND_NO_RESUMED)
  7278. /* Delete the breakpoint we stopped at, if it wants to be deleted.
  7279. Delete any breakpoint that is to be deleted at the next stop. */
  7280. breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
  7281. }
  7282. /* Try to get rid of automatically added inferiors that are no
  7283. longer needed. Keeping those around slows down things linearly.
  7284. Note that this never removes the current inferior. */
  7285. prune_inferiors ();
  7286. return 0;
  7287. }
  7288. int
  7289. signal_stop_state (int signo)
  7290. {
  7291. return signal_stop[signo];
  7292. }
  7293. int
  7294. signal_print_state (int signo)
  7295. {
  7296. return signal_print[signo];
  7297. }
  7298. int
  7299. signal_pass_state (int signo)
  7300. {
  7301. return signal_program[signo];
  7302. }
  7303. static void
  7304. signal_cache_update (int signo)
  7305. {
  7306. if (signo == -1)
  7307. {
  7308. for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
  7309. signal_cache_update (signo);
  7310. return;
  7311. }
  7312. signal_pass[signo] = (signal_stop[signo] == 0
  7313. && signal_print[signo] == 0
  7314. && signal_program[signo] == 1
  7315. && signal_catch[signo] == 0);
  7316. }
  7317. int
  7318. signal_stop_update (int signo, int state)
  7319. {
  7320. int ret = signal_stop[signo];
  7321. signal_stop[signo] = state;
  7322. signal_cache_update (signo);
  7323. return ret;
  7324. }
  7325. int
  7326. signal_print_update (int signo, int state)
  7327. {
  7328. int ret = signal_print[signo];
  7329. signal_print[signo] = state;
  7330. signal_cache_update (signo);
  7331. return ret;
  7332. }
  7333. int
  7334. signal_pass_update (int signo, int state)
  7335. {
  7336. int ret = signal_program[signo];
  7337. signal_program[signo] = state;
  7338. signal_cache_update (signo);
  7339. return ret;
  7340. }
  7341. /* Update the global 'signal_catch' from INFO and notify the
  7342. target. */
  7343. void
  7344. signal_catch_update (const unsigned int *info)
  7345. {
  7346. int i;
  7347. for (i = 0; i < GDB_SIGNAL_LAST; ++i)
  7348. signal_catch[i] = info[i] > 0;
  7349. signal_cache_update (-1);
  7350. target_pass_signals (signal_pass);
  7351. }
  7352. static void
  7353. sig_print_header (void)
  7354. {
  7355. gdb_printf (_("Signal Stop\tPrint\tPass "
  7356. "to program\tDescription\n"));
  7357. }
  7358. static void
  7359. sig_print_info (enum gdb_signal oursig)
  7360. {
  7361. const char *name = gdb_signal_to_name (oursig);
  7362. int name_padding = 13 - strlen (name);
  7363. if (name_padding <= 0)
  7364. name_padding = 0;
  7365. gdb_printf ("%s", name);
  7366. gdb_printf ("%*.*s ", name_padding, name_padding, " ");
  7367. gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
  7368. gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
  7369. gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
  7370. gdb_printf ("%s\n", gdb_signal_to_string (oursig));
  7371. }
  7372. /* Specify how various signals in the inferior should be handled. */
  7373. static void
  7374. handle_command (const char *args, int from_tty)
  7375. {
  7376. int digits, wordlen;
  7377. int sigfirst, siglast;
  7378. enum gdb_signal oursig;
  7379. int allsigs;
  7380. if (args == NULL)
  7381. {
  7382. error_no_arg (_("signal to handle"));
  7383. }
  7384. /* Allocate and zero an array of flags for which signals to handle. */
  7385. const size_t nsigs = GDB_SIGNAL_LAST;
  7386. unsigned char sigs[nsigs] {};
  7387. /* Break the command line up into args. */
  7388. gdb_argv built_argv (args);
  7389. /* Walk through the args, looking for signal oursigs, signal names, and
  7390. actions. Signal numbers and signal names may be interspersed with
  7391. actions, with the actions being performed for all signals cumulatively
  7392. specified. Signal ranges can be specified as <LOW>-<HIGH>. */
  7393. for (char *arg : built_argv)
  7394. {
  7395. wordlen = strlen (arg);
  7396. for (digits = 0; isdigit (arg[digits]); digits++)
  7397. {;
  7398. }
  7399. allsigs = 0;
  7400. sigfirst = siglast = -1;
  7401. if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
  7402. {
  7403. /* Apply action to all signals except those used by the
  7404. debugger. Silently skip those. */
  7405. allsigs = 1;
  7406. sigfirst = 0;
  7407. siglast = nsigs - 1;
  7408. }
  7409. else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
  7410. {
  7411. SET_SIGS (nsigs, sigs, signal_stop);
  7412. SET_SIGS (nsigs, sigs, signal_print);
  7413. }
  7414. else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
  7415. {
  7416. UNSET_SIGS (nsigs, sigs, signal_program);
  7417. }
  7418. else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
  7419. {
  7420. SET_SIGS (nsigs, sigs, signal_print);
  7421. }
  7422. else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
  7423. {
  7424. SET_SIGS (nsigs, sigs, signal_program);
  7425. }
  7426. else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
  7427. {
  7428. UNSET_SIGS (nsigs, sigs, signal_stop);
  7429. }
  7430. else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
  7431. {
  7432. SET_SIGS (nsigs, sigs, signal_program);
  7433. }
  7434. else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
  7435. {
  7436. UNSET_SIGS (nsigs, sigs, signal_print);
  7437. UNSET_SIGS (nsigs, sigs, signal_stop);
  7438. }
  7439. else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
  7440. {
  7441. UNSET_SIGS (nsigs, sigs, signal_program);
  7442. }
  7443. else if (digits > 0)
  7444. {
  7445. /* It is numeric. The numeric signal refers to our own
  7446. internal signal numbering from target.h, not to host/target
  7447. signal number. This is a feature; users really should be
  7448. using symbolic names anyway, and the common ones like
  7449. SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
  7450. sigfirst = siglast = (int)
  7451. gdb_signal_from_command (atoi (arg));
  7452. if (arg[digits] == '-')
  7453. {
  7454. siglast = (int)
  7455. gdb_signal_from_command (atoi (arg + digits + 1));
  7456. }
  7457. if (sigfirst > siglast)
  7458. {
  7459. /* Bet he didn't figure we'd think of this case... */
  7460. std::swap (sigfirst, siglast);
  7461. }
  7462. }
  7463. else
  7464. {
  7465. oursig = gdb_signal_from_name (arg);
  7466. if (oursig != GDB_SIGNAL_UNKNOWN)
  7467. {
  7468. sigfirst = siglast = (int) oursig;
  7469. }
  7470. else
  7471. {
  7472. /* Not a number and not a recognized flag word => complain. */
  7473. error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
  7474. }
  7475. }
  7476. /* If any signal numbers or symbol names were found, set flags for
  7477. which signals to apply actions to. */
  7478. for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
  7479. {
  7480. switch ((enum gdb_signal) signum)
  7481. {
  7482. case GDB_SIGNAL_TRAP:
  7483. case GDB_SIGNAL_INT:
  7484. if (!allsigs && !sigs[signum])
  7485. {
  7486. if (query (_("%s is used by the debugger.\n\
  7487. Are you sure you want to change it? "),
  7488. gdb_signal_to_name ((enum gdb_signal) signum)))
  7489. {
  7490. sigs[signum] = 1;
  7491. }
  7492. else
  7493. gdb_printf (_("Not confirmed, unchanged.\n"));
  7494. }
  7495. break;
  7496. case GDB_SIGNAL_0:
  7497. case GDB_SIGNAL_DEFAULT:
  7498. case GDB_SIGNAL_UNKNOWN:
  7499. /* Make sure that "all" doesn't print these. */
  7500. break;
  7501. default:
  7502. sigs[signum] = 1;
  7503. break;
  7504. }
  7505. }
  7506. }
  7507. for (int signum = 0; signum < nsigs; signum++)
  7508. if (sigs[signum])
  7509. {
  7510. signal_cache_update (-1);
  7511. target_pass_signals (signal_pass);
  7512. target_program_signals (signal_program);
  7513. if (from_tty)
  7514. {
  7515. /* Show the results. */
  7516. sig_print_header ();
  7517. for (; signum < nsigs; signum++)
  7518. if (sigs[signum])
  7519. sig_print_info ((enum gdb_signal) signum);
  7520. }
  7521. break;
  7522. }
  7523. }
  7524. /* Complete the "handle" command. */
  7525. static void
  7526. handle_completer (struct cmd_list_element *ignore,
  7527. completion_tracker &tracker,
  7528. const char *text, const char *word)
  7529. {
  7530. static const char * const keywords[] =
  7531. {
  7532. "all",
  7533. "stop",
  7534. "ignore",
  7535. "print",
  7536. "pass",
  7537. "nostop",
  7538. "noignore",
  7539. "noprint",
  7540. "nopass",
  7541. NULL,
  7542. };
  7543. signal_completer (ignore, tracker, text, word);
  7544. complete_on_enum (tracker, keywords, word, word);
  7545. }
  7546. enum gdb_signal
  7547. gdb_signal_from_command (int num)
  7548. {
  7549. if (num >= 1 && num <= 15)
  7550. return (enum gdb_signal) num;
  7551. error (_("Only signals 1-15 are valid as numeric signals.\n\
  7552. Use \"info signals\" for a list of symbolic signals."));
  7553. }
  7554. /* Print current contents of the tables set by the handle command.
  7555. It is possible we should just be printing signals actually used
  7556. by the current target (but for things to work right when switching
  7557. targets, all signals should be in the signal tables). */
  7558. static void
  7559. info_signals_command (const char *signum_exp, int from_tty)
  7560. {
  7561. enum gdb_signal oursig;
  7562. sig_print_header ();
  7563. if (signum_exp)
  7564. {
  7565. /* First see if this is a symbol name. */
  7566. oursig = gdb_signal_from_name (signum_exp);
  7567. if (oursig == GDB_SIGNAL_UNKNOWN)
  7568. {
  7569. /* No, try numeric. */
  7570. oursig =
  7571. gdb_signal_from_command (parse_and_eval_long (signum_exp));
  7572. }
  7573. sig_print_info (oursig);
  7574. return;
  7575. }
  7576. gdb_printf ("\n");
  7577. /* These ugly casts brought to you by the native VAX compiler. */
  7578. for (oursig = GDB_SIGNAL_FIRST;
  7579. (int) oursig < (int) GDB_SIGNAL_LAST;
  7580. oursig = (enum gdb_signal) ((int) oursig + 1))
  7581. {
  7582. QUIT;
  7583. if (oursig != GDB_SIGNAL_UNKNOWN
  7584. && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
  7585. sig_print_info (oursig);
  7586. }
  7587. gdb_printf (_("\nUse the \"handle\" command "
  7588. "to change these tables.\n"));
  7589. }
  7590. /* The $_siginfo convenience variable is a bit special. We don't know
  7591. for sure the type of the value until we actually have a chance to
  7592. fetch the data. The type can change depending on gdbarch, so it is
  7593. also dependent on which thread you have selected.
  7594. 1. making $_siginfo be an internalvar that creates a new value on
  7595. access.
  7596. 2. making the value of $_siginfo be an lval_computed value. */
  7597. /* This function implements the lval_computed support for reading a
  7598. $_siginfo value. */
  7599. static void
  7600. siginfo_value_read (struct value *v)
  7601. {
  7602. LONGEST transferred;
  7603. /* If we can access registers, so can we access $_siginfo. Likewise
  7604. vice versa. */
  7605. validate_registers_access ();
  7606. transferred =
  7607. target_read (current_inferior ()->top_target (),
  7608. TARGET_OBJECT_SIGNAL_INFO,
  7609. NULL,
  7610. value_contents_all_raw (v).data (),
  7611. value_offset (v),
  7612. TYPE_LENGTH (value_type (v)));
  7613. if (transferred != TYPE_LENGTH (value_type (v)))
  7614. error (_("Unable to read siginfo"));
  7615. }
  7616. /* This function implements the lval_computed support for writing a
  7617. $_siginfo value. */
  7618. static void
  7619. siginfo_value_write (struct value *v, struct value *fromval)
  7620. {
  7621. LONGEST transferred;
  7622. /* If we can access registers, so can we access $_siginfo. Likewise
  7623. vice versa. */
  7624. validate_registers_access ();
  7625. transferred = target_write (current_inferior ()->top_target (),
  7626. TARGET_OBJECT_SIGNAL_INFO,
  7627. NULL,
  7628. value_contents_all_raw (fromval).data (),
  7629. value_offset (v),
  7630. TYPE_LENGTH (value_type (fromval)));
  7631. if (transferred != TYPE_LENGTH (value_type (fromval)))
  7632. error (_("Unable to write siginfo"));
  7633. }
  7634. static const struct lval_funcs siginfo_value_funcs =
  7635. {
  7636. siginfo_value_read,
  7637. siginfo_value_write
  7638. };
  7639. /* Return a new value with the correct type for the siginfo object of
  7640. the current thread using architecture GDBARCH. Return a void value
  7641. if there's no object available. */
  7642. static struct value *
  7643. siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
  7644. void *ignore)
  7645. {
  7646. if (target_has_stack ()
  7647. && inferior_ptid != null_ptid
  7648. && gdbarch_get_siginfo_type_p (gdbarch))
  7649. {
  7650. struct type *type = gdbarch_get_siginfo_type (gdbarch);
  7651. return allocate_computed_value (type, &siginfo_value_funcs, NULL);
  7652. }
  7653. return allocate_value (builtin_type (gdbarch)->builtin_void);
  7654. }
  7655. /* infcall_suspend_state contains state about the program itself like its
  7656. registers and any signal it received when it last stopped.
  7657. This state must be restored regardless of how the inferior function call
  7658. ends (either successfully, or after it hits a breakpoint or signal)
  7659. if the program is to properly continue where it left off. */
  7660. class infcall_suspend_state
  7661. {
  7662. public:
  7663. /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
  7664. once the inferior function call has finished. */
  7665. infcall_suspend_state (struct gdbarch *gdbarch,
  7666. const struct thread_info *tp,
  7667. struct regcache *regcache)
  7668. : m_registers (new readonly_detached_regcache (*regcache))
  7669. {
  7670. tp->save_suspend_to (m_thread_suspend);
  7671. gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
  7672. if (gdbarch_get_siginfo_type_p (gdbarch))
  7673. {
  7674. struct type *type = gdbarch_get_siginfo_type (gdbarch);
  7675. size_t len = TYPE_LENGTH (type);
  7676. siginfo_data.reset ((gdb_byte *) xmalloc (len));
  7677. if (target_read (current_inferior ()->top_target (),
  7678. TARGET_OBJECT_SIGNAL_INFO, NULL,
  7679. siginfo_data.get (), 0, len) != len)
  7680. {
  7681. /* Errors ignored. */
  7682. siginfo_data.reset (nullptr);
  7683. }
  7684. }
  7685. if (siginfo_data)
  7686. {
  7687. m_siginfo_gdbarch = gdbarch;
  7688. m_siginfo_data = std::move (siginfo_data);
  7689. }
  7690. }
  7691. /* Return a pointer to the stored register state. */
  7692. readonly_detached_regcache *registers () const
  7693. {
  7694. return m_registers.get ();
  7695. }
  7696. /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
  7697. void restore (struct gdbarch *gdbarch,
  7698. struct thread_info *tp,
  7699. struct regcache *regcache) const
  7700. {
  7701. tp->restore_suspend_from (m_thread_suspend);
  7702. if (m_siginfo_gdbarch == gdbarch)
  7703. {
  7704. struct type *type = gdbarch_get_siginfo_type (gdbarch);
  7705. /* Errors ignored. */
  7706. target_write (current_inferior ()->top_target (),
  7707. TARGET_OBJECT_SIGNAL_INFO, NULL,
  7708. m_siginfo_data.get (), 0, TYPE_LENGTH (type));
  7709. }
  7710. /* The inferior can be gone if the user types "print exit(0)"
  7711. (and perhaps other times). */
  7712. if (target_has_execution ())
  7713. /* NB: The register write goes through to the target. */
  7714. regcache->restore (registers ());
  7715. }
  7716. private:
  7717. /* How the current thread stopped before the inferior function call was
  7718. executed. */
  7719. struct thread_suspend_state m_thread_suspend;
  7720. /* The registers before the inferior function call was executed. */
  7721. std::unique_ptr<readonly_detached_regcache> m_registers;
  7722. /* Format of SIGINFO_DATA or NULL if it is not present. */
  7723. struct gdbarch *m_siginfo_gdbarch = nullptr;
  7724. /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
  7725. TYPE_LENGTH (gdbarch_get_siginfo_type ()). For different gdbarch the
  7726. content would be invalid. */
  7727. gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
  7728. };
  7729. infcall_suspend_state_up
  7730. save_infcall_suspend_state ()
  7731. {
  7732. struct thread_info *tp = inferior_thread ();
  7733. struct regcache *regcache = get_current_regcache ();
  7734. struct gdbarch *gdbarch = regcache->arch ();
  7735. infcall_suspend_state_up inf_state
  7736. (new struct infcall_suspend_state (gdbarch, tp, regcache));
  7737. /* Having saved the current state, adjust the thread state, discarding
  7738. any stop signal information. The stop signal is not useful when
  7739. starting an inferior function call, and run_inferior_call will not use
  7740. the signal due to its `proceed' call with GDB_SIGNAL_0. */
  7741. tp->set_stop_signal (GDB_SIGNAL_0);
  7742. return inf_state;
  7743. }
  7744. /* Restore inferior session state to INF_STATE. */
  7745. void
  7746. restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
  7747. {
  7748. struct thread_info *tp = inferior_thread ();
  7749. struct regcache *regcache = get_current_regcache ();
  7750. struct gdbarch *gdbarch = regcache->arch ();
  7751. inf_state->restore (gdbarch, tp, regcache);
  7752. discard_infcall_suspend_state (inf_state);
  7753. }
  7754. void
  7755. discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
  7756. {
  7757. delete inf_state;
  7758. }
  7759. readonly_detached_regcache *
  7760. get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
  7761. {
  7762. return inf_state->registers ();
  7763. }
  7764. /* infcall_control_state contains state regarding gdb's control of the
  7765. inferior itself like stepping control. It also contains session state like
  7766. the user's currently selected frame. */
  7767. struct infcall_control_state
  7768. {
  7769. struct thread_control_state thread_control;
  7770. struct inferior_control_state inferior_control;
  7771. /* Other fields: */
  7772. enum stop_stack_kind stop_stack_dummy = STOP_NONE;
  7773. int stopped_by_random_signal = 0;
  7774. /* ID and level of the selected frame when the inferior function
  7775. call was made. */
  7776. struct frame_id selected_frame_id {};
  7777. int selected_frame_level = -1;
  7778. };
  7779. /* Save all of the information associated with the inferior<==>gdb
  7780. connection. */
  7781. infcall_control_state_up
  7782. save_infcall_control_state ()
  7783. {
  7784. infcall_control_state_up inf_status (new struct infcall_control_state);
  7785. struct thread_info *tp = inferior_thread ();
  7786. struct inferior *inf = current_inferior ();
  7787. inf_status->thread_control = tp->control;
  7788. inf_status->inferior_control = inf->control;
  7789. tp->control.step_resume_breakpoint = NULL;
  7790. tp->control.exception_resume_breakpoint = NULL;
  7791. /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
  7792. chain. If caller's caller is walking the chain, they'll be happier if we
  7793. hand them back the original chain when restore_infcall_control_state is
  7794. called. */
  7795. tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
  7796. /* Other fields: */
  7797. inf_status->stop_stack_dummy = stop_stack_dummy;
  7798. inf_status->stopped_by_random_signal = stopped_by_random_signal;
  7799. save_selected_frame (&inf_status->selected_frame_id,
  7800. &inf_status->selected_frame_level);
  7801. return inf_status;
  7802. }
  7803. /* Restore inferior session state to INF_STATUS. */
  7804. void
  7805. restore_infcall_control_state (struct infcall_control_state *inf_status)
  7806. {
  7807. struct thread_info *tp = inferior_thread ();
  7808. struct inferior *inf = current_inferior ();
  7809. if (tp->control.step_resume_breakpoint)
  7810. tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
  7811. if (tp->control.exception_resume_breakpoint)
  7812. tp->control.exception_resume_breakpoint->disposition
  7813. = disp_del_at_next_stop;
  7814. /* Handle the bpstat_copy of the chain. */
  7815. bpstat_clear (&tp->control.stop_bpstat);
  7816. tp->control = inf_status->thread_control;
  7817. inf->control = inf_status->inferior_control;
  7818. /* Other fields: */
  7819. stop_stack_dummy = inf_status->stop_stack_dummy;
  7820. stopped_by_random_signal = inf_status->stopped_by_random_signal;
  7821. if (target_has_stack ())
  7822. {
  7823. restore_selected_frame (inf_status->selected_frame_id,
  7824. inf_status->selected_frame_level);
  7825. }
  7826. delete inf_status;
  7827. }
  7828. void
  7829. discard_infcall_control_state (struct infcall_control_state *inf_status)
  7830. {
  7831. if (inf_status->thread_control.step_resume_breakpoint)
  7832. inf_status->thread_control.step_resume_breakpoint->disposition
  7833. = disp_del_at_next_stop;
  7834. if (inf_status->thread_control.exception_resume_breakpoint)
  7835. inf_status->thread_control.exception_resume_breakpoint->disposition
  7836. = disp_del_at_next_stop;
  7837. /* See save_infcall_control_state for info on stop_bpstat. */
  7838. bpstat_clear (&inf_status->thread_control.stop_bpstat);
  7839. delete inf_status;
  7840. }
  7841. /* See infrun.h. */
  7842. void
  7843. clear_exit_convenience_vars (void)
  7844. {
  7845. clear_internalvar (lookup_internalvar ("_exitsignal"));
  7846. clear_internalvar (lookup_internalvar ("_exitcode"));
  7847. }
  7848. /* User interface for reverse debugging:
  7849. Set exec-direction / show exec-direction commands
  7850. (returns error unless target implements to_set_exec_direction method). */
  7851. enum exec_direction_kind execution_direction = EXEC_FORWARD;
  7852. static const char exec_forward[] = "forward";
  7853. static const char exec_reverse[] = "reverse";
  7854. static const char *exec_direction = exec_forward;
  7855. static const char *const exec_direction_names[] = {
  7856. exec_forward,
  7857. exec_reverse,
  7858. NULL
  7859. };
  7860. static void
  7861. set_exec_direction_func (const char *args, int from_tty,
  7862. struct cmd_list_element *cmd)
  7863. {
  7864. if (target_can_execute_reverse ())
  7865. {
  7866. if (!strcmp (exec_direction, exec_forward))
  7867. execution_direction = EXEC_FORWARD;
  7868. else if (!strcmp (exec_direction, exec_reverse))
  7869. execution_direction = EXEC_REVERSE;
  7870. }
  7871. else
  7872. {
  7873. exec_direction = exec_forward;
  7874. error (_("Target does not support this operation."));
  7875. }
  7876. }
  7877. static void
  7878. show_exec_direction_func (struct ui_file *out, int from_tty,
  7879. struct cmd_list_element *cmd, const char *value)
  7880. {
  7881. switch (execution_direction) {
  7882. case EXEC_FORWARD:
  7883. gdb_printf (out, _("Forward.\n"));
  7884. break;
  7885. case EXEC_REVERSE:
  7886. gdb_printf (out, _("Reverse.\n"));
  7887. break;
  7888. default:
  7889. internal_error (__FILE__, __LINE__,
  7890. _("bogus execution_direction value: %d"),
  7891. (int) execution_direction);
  7892. }
  7893. }
  7894. static void
  7895. show_schedule_multiple (struct ui_file *file, int from_tty,
  7896. struct cmd_list_element *c, const char *value)
  7897. {
  7898. gdb_printf (file, _("Resuming the execution of threads "
  7899. "of all processes is %s.\n"), value);
  7900. }
  7901. /* Implementation of `siginfo' variable. */
  7902. static const struct internalvar_funcs siginfo_funcs =
  7903. {
  7904. siginfo_make_value,
  7905. NULL,
  7906. };
  7907. /* Callback for infrun's target events source. This is marked when a
  7908. thread has a pending status to process. */
  7909. static void
  7910. infrun_async_inferior_event_handler (gdb_client_data data)
  7911. {
  7912. clear_async_event_handler (infrun_async_inferior_event_token);
  7913. inferior_event_handler (INF_REG_EVENT);
  7914. }
  7915. #if GDB_SELF_TEST
  7916. namespace selftests
  7917. {
  7918. /* Verify that when two threads with the same ptid exist (from two different
  7919. targets) and one of them changes ptid, we only update inferior_ptid if
  7920. it is appropriate. */
  7921. static void
  7922. infrun_thread_ptid_changed ()
  7923. {
  7924. gdbarch *arch = current_inferior ()->gdbarch;
  7925. /* The thread which inferior_ptid represents changes ptid. */
  7926. {
  7927. scoped_restore_current_pspace_and_thread restore;
  7928. scoped_mock_context<test_target_ops> target1 (arch);
  7929. scoped_mock_context<test_target_ops> target2 (arch);
  7930. ptid_t old_ptid (111, 222);
  7931. ptid_t new_ptid (111, 333);
  7932. target1.mock_inferior.pid = old_ptid.pid ();
  7933. target1.mock_thread.ptid = old_ptid;
  7934. target1.mock_inferior.ptid_thread_map.clear ();
  7935. target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
  7936. target2.mock_inferior.pid = old_ptid.pid ();
  7937. target2.mock_thread.ptid = old_ptid;
  7938. target2.mock_inferior.ptid_thread_map.clear ();
  7939. target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
  7940. auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
  7941. set_current_inferior (&target1.mock_inferior);
  7942. thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
  7943. gdb_assert (inferior_ptid == new_ptid);
  7944. }
  7945. /* A thread with the same ptid as inferior_ptid, but from another target,
  7946. changes ptid. */
  7947. {
  7948. scoped_restore_current_pspace_and_thread restore;
  7949. scoped_mock_context<test_target_ops> target1 (arch);
  7950. scoped_mock_context<test_target_ops> target2 (arch);
  7951. ptid_t old_ptid (111, 222);
  7952. ptid_t new_ptid (111, 333);
  7953. target1.mock_inferior.pid = old_ptid.pid ();
  7954. target1.mock_thread.ptid = old_ptid;
  7955. target1.mock_inferior.ptid_thread_map.clear ();
  7956. target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
  7957. target2.mock_inferior.pid = old_ptid.pid ();
  7958. target2.mock_thread.ptid = old_ptid;
  7959. target2.mock_inferior.ptid_thread_map.clear ();
  7960. target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
  7961. auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
  7962. set_current_inferior (&target2.mock_inferior);
  7963. thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
  7964. gdb_assert (inferior_ptid == old_ptid);
  7965. }
  7966. }
  7967. } /* namespace selftests */
  7968. #endif /* GDB_SELF_TEST */
  7969. void _initialize_infrun ();
  7970. void
  7971. _initialize_infrun ()
  7972. {
  7973. struct cmd_list_element *c;
  7974. /* Register extra event sources in the event loop. */
  7975. infrun_async_inferior_event_token
  7976. = create_async_event_handler (infrun_async_inferior_event_handler, NULL,
  7977. "infrun");
  7978. cmd_list_element *info_signals_cmd
  7979. = add_info ("signals", info_signals_command, _("\
  7980. What debugger does when program gets various signals.\n\
  7981. Specify a signal as argument to print info on that signal only."));
  7982. add_info_alias ("handle", info_signals_cmd, 0);
  7983. c = add_com ("handle", class_run, handle_command, _("\
  7984. Specify how to handle signals.\n\
  7985. Usage: handle SIGNAL [ACTIONS]\n\
  7986. Args are signals and actions to apply to those signals.\n\
  7987. If no actions are specified, the current settings for the specified signals\n\
  7988. will be displayed instead.\n\
  7989. \n\
  7990. Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
  7991. from 1-15 are allowed for compatibility with old versions of GDB.\n\
  7992. Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
  7993. The special arg \"all\" is recognized to mean all signals except those\n\
  7994. used by the debugger, typically SIGTRAP and SIGINT.\n\
  7995. \n\
  7996. Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
  7997. \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
  7998. Stop means reenter debugger if this signal happens (implies print).\n\
  7999. Print means print a message if this signal happens.\n\
  8000. Pass means let program see this signal; otherwise program doesn't know.\n\
  8001. Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
  8002. Pass and Stop may be combined.\n\
  8003. \n\
  8004. Multiple signals may be specified. Signal numbers and signal names\n\
  8005. may be interspersed with actions, with the actions being performed for\n\
  8006. all signals cumulatively specified."));
  8007. set_cmd_completer (c, handle_completer);
  8008. stop_command = add_cmd ("stop", class_obscure,
  8009. not_just_help_class_command, _("\
  8010. There is no `stop' command, but you can set a hook on `stop'.\n\
  8011. This allows you to set a list of commands to be run each time execution\n\
  8012. of the program stops."), &cmdlist);
  8013. add_setshow_boolean_cmd
  8014. ("infrun", class_maintenance, &debug_infrun,
  8015. _("Set inferior debugging."),
  8016. _("Show inferior debugging."),
  8017. _("When non-zero, inferior specific debugging is enabled."),
  8018. NULL, show_debug_infrun, &setdebuglist, &showdebuglist);
  8019. add_setshow_boolean_cmd ("non-stop", no_class,
  8020. &non_stop_1, _("\
  8021. Set whether gdb controls the inferior in non-stop mode."), _("\
  8022. Show whether gdb controls the inferior in non-stop mode."), _("\
  8023. When debugging a multi-threaded program and this setting is\n\
  8024. off (the default, also called all-stop mode), when one thread stops\n\
  8025. (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
  8026. all other threads in the program while you interact with the thread of\n\
  8027. interest. When you continue or step a thread, you can allow the other\n\
  8028. threads to run, or have them remain stopped, but while you inspect any\n\
  8029. thread's state, all threads stop.\n\
  8030. \n\
  8031. In non-stop mode, when one thread stops, other threads can continue\n\
  8032. to run freely. You'll be able to step each thread independently,\n\
  8033. leave it stopped or free to run as needed."),
  8034. set_non_stop,
  8035. show_non_stop,
  8036. &setlist,
  8037. &showlist);
  8038. for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
  8039. {
  8040. signal_stop[i] = 1;
  8041. signal_print[i] = 1;
  8042. signal_program[i] = 1;
  8043. signal_catch[i] = 0;
  8044. }
  8045. /* Signals caused by debugger's own actions should not be given to
  8046. the program afterwards.
  8047. Do not deliver GDB_SIGNAL_TRAP by default, except when the user
  8048. explicitly specifies that it should be delivered to the target
  8049. program. Typically, that would occur when a user is debugging a
  8050. target monitor on a simulator: the target monitor sets a
  8051. breakpoint; the simulator encounters this breakpoint and halts
  8052. the simulation handing control to GDB; GDB, noting that the stop
  8053. address doesn't map to any known breakpoint, returns control back
  8054. to the simulator; the simulator then delivers the hardware
  8055. equivalent of a GDB_SIGNAL_TRAP to the program being
  8056. debugged. */
  8057. signal_program[GDB_SIGNAL_TRAP] = 0;
  8058. signal_program[GDB_SIGNAL_INT] = 0;
  8059. /* Signals that are not errors should not normally enter the debugger. */
  8060. signal_stop[GDB_SIGNAL_ALRM] = 0;
  8061. signal_print[GDB_SIGNAL_ALRM] = 0;
  8062. signal_stop[GDB_SIGNAL_VTALRM] = 0;
  8063. signal_print[GDB_SIGNAL_VTALRM] = 0;
  8064. signal_stop[GDB_SIGNAL_PROF] = 0;
  8065. signal_print[GDB_SIGNAL_PROF] = 0;
  8066. signal_stop[GDB_SIGNAL_CHLD] = 0;
  8067. signal_print[GDB_SIGNAL_CHLD] = 0;
  8068. signal_stop[GDB_SIGNAL_IO] = 0;
  8069. signal_print[GDB_SIGNAL_IO] = 0;
  8070. signal_stop[GDB_SIGNAL_POLL] = 0;
  8071. signal_print[GDB_SIGNAL_POLL] = 0;
  8072. signal_stop[GDB_SIGNAL_URG] = 0;
  8073. signal_print[GDB_SIGNAL_URG] = 0;
  8074. signal_stop[GDB_SIGNAL_WINCH] = 0;
  8075. signal_print[GDB_SIGNAL_WINCH] = 0;
  8076. signal_stop[GDB_SIGNAL_PRIO] = 0;
  8077. signal_print[GDB_SIGNAL_PRIO] = 0;
  8078. /* These signals are used internally by user-level thread
  8079. implementations. (See signal(5) on Solaris.) Like the above
  8080. signals, a healthy program receives and handles them as part of
  8081. its normal operation. */
  8082. signal_stop[GDB_SIGNAL_LWP] = 0;
  8083. signal_print[GDB_SIGNAL_LWP] = 0;
  8084. signal_stop[GDB_SIGNAL_WAITING] = 0;
  8085. signal_print[GDB_SIGNAL_WAITING] = 0;
  8086. signal_stop[GDB_SIGNAL_CANCEL] = 0;
  8087. signal_print[GDB_SIGNAL_CANCEL] = 0;
  8088. signal_stop[GDB_SIGNAL_LIBRT] = 0;
  8089. signal_print[GDB_SIGNAL_LIBRT] = 0;
  8090. /* Update cached state. */
  8091. signal_cache_update (-1);
  8092. add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
  8093. &stop_on_solib_events, _("\
  8094. Set stopping for shared library events."), _("\
  8095. Show stopping for shared library events."), _("\
  8096. If nonzero, gdb will give control to the user when the dynamic linker\n\
  8097. notifies gdb of shared library events. The most common event of interest\n\
  8098. to the user would be loading/unloading of a new library."),
  8099. set_stop_on_solib_events,
  8100. show_stop_on_solib_events,
  8101. &setlist, &showlist);
  8102. add_setshow_enum_cmd ("follow-fork-mode", class_run,
  8103. follow_fork_mode_kind_names,
  8104. &follow_fork_mode_string, _("\
  8105. Set debugger response to a program call of fork or vfork."), _("\
  8106. Show debugger response to a program call of fork or vfork."), _("\
  8107. A fork or vfork creates a new process. follow-fork-mode can be:\n\
  8108. parent - the original process is debugged after a fork\n\
  8109. child - the new process is debugged after a fork\n\
  8110. The unfollowed process will continue to run.\n\
  8111. By default, the debugger will follow the parent process."),
  8112. NULL,
  8113. show_follow_fork_mode_string,
  8114. &setlist, &showlist);
  8115. add_setshow_enum_cmd ("follow-exec-mode", class_run,
  8116. follow_exec_mode_names,
  8117. &follow_exec_mode_string, _("\
  8118. Set debugger response to a program call of exec."), _("\
  8119. Show debugger response to a program call of exec."), _("\
  8120. An exec call replaces the program image of a process.\n\
  8121. \n\
  8122. follow-exec-mode can be:\n\
  8123. \n\
  8124. new - the debugger creates a new inferior and rebinds the process\n\
  8125. to this new inferior. The program the process was running before\n\
  8126. the exec call can be restarted afterwards by restarting the original\n\
  8127. inferior.\n\
  8128. \n\
  8129. same - the debugger keeps the process bound to the same inferior.\n\
  8130. The new executable image replaces the previous executable loaded in\n\
  8131. the inferior. Restarting the inferior after the exec call restarts\n\
  8132. the executable the process was running after the exec call.\n\
  8133. \n\
  8134. By default, the debugger will use the same inferior."),
  8135. NULL,
  8136. show_follow_exec_mode_string,
  8137. &setlist, &showlist);
  8138. add_setshow_enum_cmd ("scheduler-locking", class_run,
  8139. scheduler_enums, &scheduler_mode, _("\
  8140. Set mode for locking scheduler during execution."), _("\
  8141. Show mode for locking scheduler during execution."), _("\
  8142. off == no locking (threads may preempt at any time)\n\
  8143. on == full locking (no thread except the current thread may run)\n\
  8144. This applies to both normal execution and replay mode.\n\
  8145. step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
  8146. In this mode, other threads may run during other commands.\n\
  8147. This applies to both normal execution and replay mode.\n\
  8148. replay == scheduler locked in replay mode and unlocked during normal execution."),
  8149. set_schedlock_func, /* traps on target vector */
  8150. show_scheduler_mode,
  8151. &setlist, &showlist);
  8152. add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
  8153. Set mode for resuming threads of all processes."), _("\
  8154. Show mode for resuming threads of all processes."), _("\
  8155. When on, execution commands (such as 'continue' or 'next') resume all\n\
  8156. threads of all processes. When off (which is the default), execution\n\
  8157. commands only resume the threads of the current process. The set of\n\
  8158. threads that are resumed is further refined by the scheduler-locking\n\
  8159. mode (see help set scheduler-locking)."),
  8160. NULL,
  8161. show_schedule_multiple,
  8162. &setlist, &showlist);
  8163. add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
  8164. Set mode of the step operation."), _("\
  8165. Show mode of the step operation."), _("\
  8166. When set, doing a step over a function without debug line information\n\
  8167. will stop at the first instruction of that function. Otherwise, the\n\
  8168. function is skipped and the step command stops at a different source line."),
  8169. NULL,
  8170. show_step_stop_if_no_debug,
  8171. &setlist, &showlist);
  8172. add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
  8173. &can_use_displaced_stepping, _("\
  8174. Set debugger's willingness to use displaced stepping."), _("\
  8175. Show debugger's willingness to use displaced stepping."), _("\
  8176. If on, gdb will use displaced stepping to step over breakpoints if it is\n\
  8177. supported by the target architecture. If off, gdb will not use displaced\n\
  8178. stepping to step over breakpoints, even if such is supported by the target\n\
  8179. architecture. If auto (which is the default), gdb will use displaced stepping\n\
  8180. if the target architecture supports it and non-stop mode is active, but will not\n\
  8181. use it in all-stop mode (see help set non-stop)."),
  8182. NULL,
  8183. show_can_use_displaced_stepping,
  8184. &setlist, &showlist);
  8185. add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
  8186. &exec_direction, _("Set direction of execution.\n\
  8187. Options are 'forward' or 'reverse'."),
  8188. _("Show direction of execution (forward/reverse)."),
  8189. _("Tells gdb whether to execute forward or backward."),
  8190. set_exec_direction_func, show_exec_direction_func,
  8191. &setlist, &showlist);
  8192. /* Set/show detach-on-fork: user-settable mode. */
  8193. add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
  8194. Set whether gdb will detach the child of a fork."), _("\
  8195. Show whether gdb will detach the child of a fork."), _("\
  8196. Tells gdb whether to detach the child of a fork."),
  8197. NULL, NULL, &setlist, &showlist);
  8198. /* Set/show disable address space randomization mode. */
  8199. add_setshow_boolean_cmd ("disable-randomization", class_support,
  8200. &disable_randomization, _("\
  8201. Set disabling of debuggee's virtual address space randomization."), _("\
  8202. Show disabling of debuggee's virtual address space randomization."), _("\
  8203. When this mode is on (which is the default), randomization of the virtual\n\
  8204. address space is disabled. Standalone programs run with the randomization\n\
  8205. enabled by default on some platforms."),
  8206. &set_disable_randomization,
  8207. &show_disable_randomization,
  8208. &setlist, &showlist);
  8209. /* ptid initializations */
  8210. inferior_ptid = null_ptid;
  8211. target_last_wait_ptid = minus_one_ptid;
  8212. gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
  8213. "infrun");
  8214. gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
  8215. "infrun");
  8216. gdb::observers::thread_exit.attach (infrun_thread_thread_exit, "infrun");
  8217. gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
  8218. gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
  8219. /* Explicitly create without lookup, since that tries to create a
  8220. value with a void typed value, and when we get here, gdbarch
  8221. isn't initialized yet. At this point, we're quite sure there
  8222. isn't another convenience variable of the same name. */
  8223. create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, NULL);
  8224. add_setshow_boolean_cmd ("observer", no_class,
  8225. &observer_mode_1, _("\
  8226. Set whether gdb controls the inferior in observer mode."), _("\
  8227. Show whether gdb controls the inferior in observer mode."), _("\
  8228. In observer mode, GDB can get data from the inferior, but not\n\
  8229. affect its execution. Registers and memory may not be changed,\n\
  8230. breakpoints may not be set, and the program cannot be interrupted\n\
  8231. or signalled."),
  8232. set_observer_mode,
  8233. show_observer_mode,
  8234. &setlist,
  8235. &showlist);
  8236. #if GDB_SELF_TEST
  8237. selftests::register_test ("infrun_thread_ptid_changed",
  8238. selftests::infrun_thread_ptid_changed);
  8239. #endif
  8240. }