aarch64-tdep.c 138 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718
  1. /* Common target dependent code for GDB on AArch64 systems.
  2. Copyright (C) 2009-2022 Free Software Foundation, Inc.
  3. Contributed by ARM Ltd.
  4. This file is part of GDB.
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. #include "defs.h"
  16. #include "frame.h"
  17. #include "gdbcmd.h"
  18. #include "gdbcore.h"
  19. #include "dis-asm.h"
  20. #include "regcache.h"
  21. #include "reggroups.h"
  22. #include "value.h"
  23. #include "arch-utils.h"
  24. #include "osabi.h"
  25. #include "frame-unwind.h"
  26. #include "frame-base.h"
  27. #include "trad-frame.h"
  28. #include "objfiles.h"
  29. #include "dwarf2.h"
  30. #include "dwarf2/frame.h"
  31. #include "gdbtypes.h"
  32. #include "prologue-value.h"
  33. #include "target-descriptions.h"
  34. #include "user-regs.h"
  35. #include "ax-gdb.h"
  36. #include "gdbsupport/selftest.h"
  37. #include "aarch64-tdep.h"
  38. #include "aarch64-ravenscar-thread.h"
  39. #include "record.h"
  40. #include "record-full.h"
  41. #include "arch/aarch64-insn.h"
  42. #include "gdbarch.h"
  43. #include "opcode/aarch64.h"
  44. #include <algorithm>
  45. /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
  46. four members. */
  47. #define HA_MAX_NUM_FLDS 4
  48. /* All possible aarch64 target descriptors. */
  49. static target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/][2 /* mte */];
  50. /* The standard register names, and all the valid aliases for them. */
  51. static const struct
  52. {
  53. const char *const name;
  54. int regnum;
  55. } aarch64_register_aliases[] =
  56. {
  57. /* 64-bit register names. */
  58. {"fp", AARCH64_FP_REGNUM},
  59. {"lr", AARCH64_LR_REGNUM},
  60. {"sp", AARCH64_SP_REGNUM},
  61. /* 32-bit register names. */
  62. {"w0", AARCH64_X0_REGNUM + 0},
  63. {"w1", AARCH64_X0_REGNUM + 1},
  64. {"w2", AARCH64_X0_REGNUM + 2},
  65. {"w3", AARCH64_X0_REGNUM + 3},
  66. {"w4", AARCH64_X0_REGNUM + 4},
  67. {"w5", AARCH64_X0_REGNUM + 5},
  68. {"w6", AARCH64_X0_REGNUM + 6},
  69. {"w7", AARCH64_X0_REGNUM + 7},
  70. {"w8", AARCH64_X0_REGNUM + 8},
  71. {"w9", AARCH64_X0_REGNUM + 9},
  72. {"w10", AARCH64_X0_REGNUM + 10},
  73. {"w11", AARCH64_X0_REGNUM + 11},
  74. {"w12", AARCH64_X0_REGNUM + 12},
  75. {"w13", AARCH64_X0_REGNUM + 13},
  76. {"w14", AARCH64_X0_REGNUM + 14},
  77. {"w15", AARCH64_X0_REGNUM + 15},
  78. {"w16", AARCH64_X0_REGNUM + 16},
  79. {"w17", AARCH64_X0_REGNUM + 17},
  80. {"w18", AARCH64_X0_REGNUM + 18},
  81. {"w19", AARCH64_X0_REGNUM + 19},
  82. {"w20", AARCH64_X0_REGNUM + 20},
  83. {"w21", AARCH64_X0_REGNUM + 21},
  84. {"w22", AARCH64_X0_REGNUM + 22},
  85. {"w23", AARCH64_X0_REGNUM + 23},
  86. {"w24", AARCH64_X0_REGNUM + 24},
  87. {"w25", AARCH64_X0_REGNUM + 25},
  88. {"w26", AARCH64_X0_REGNUM + 26},
  89. {"w27", AARCH64_X0_REGNUM + 27},
  90. {"w28", AARCH64_X0_REGNUM + 28},
  91. {"w29", AARCH64_X0_REGNUM + 29},
  92. {"w30", AARCH64_X0_REGNUM + 30},
  93. /* specials */
  94. {"ip0", AARCH64_X0_REGNUM + 16},
  95. {"ip1", AARCH64_X0_REGNUM + 17}
  96. };
  97. /* The required core 'R' registers. */
  98. static const char *const aarch64_r_register_names[] =
  99. {
  100. /* These registers must appear in consecutive RAW register number
  101. order and they must begin with AARCH64_X0_REGNUM! */
  102. "x0", "x1", "x2", "x3",
  103. "x4", "x5", "x6", "x7",
  104. "x8", "x9", "x10", "x11",
  105. "x12", "x13", "x14", "x15",
  106. "x16", "x17", "x18", "x19",
  107. "x20", "x21", "x22", "x23",
  108. "x24", "x25", "x26", "x27",
  109. "x28", "x29", "x30", "sp",
  110. "pc", "cpsr"
  111. };
  112. /* The FP/SIMD 'V' registers. */
  113. static const char *const aarch64_v_register_names[] =
  114. {
  115. /* These registers must appear in consecutive RAW register number
  116. order and they must begin with AARCH64_V0_REGNUM! */
  117. "v0", "v1", "v2", "v3",
  118. "v4", "v5", "v6", "v7",
  119. "v8", "v9", "v10", "v11",
  120. "v12", "v13", "v14", "v15",
  121. "v16", "v17", "v18", "v19",
  122. "v20", "v21", "v22", "v23",
  123. "v24", "v25", "v26", "v27",
  124. "v28", "v29", "v30", "v31",
  125. "fpsr",
  126. "fpcr"
  127. };
  128. /* The SVE 'Z' and 'P' registers. */
  129. static const char *const aarch64_sve_register_names[] =
  130. {
  131. /* These registers must appear in consecutive RAW register number
  132. order and they must begin with AARCH64_SVE_Z0_REGNUM! */
  133. "z0", "z1", "z2", "z3",
  134. "z4", "z5", "z6", "z7",
  135. "z8", "z9", "z10", "z11",
  136. "z12", "z13", "z14", "z15",
  137. "z16", "z17", "z18", "z19",
  138. "z20", "z21", "z22", "z23",
  139. "z24", "z25", "z26", "z27",
  140. "z28", "z29", "z30", "z31",
  141. "fpsr", "fpcr",
  142. "p0", "p1", "p2", "p3",
  143. "p4", "p5", "p6", "p7",
  144. "p8", "p9", "p10", "p11",
  145. "p12", "p13", "p14", "p15",
  146. "ffr", "vg"
  147. };
  148. static const char *const aarch64_pauth_register_names[] =
  149. {
  150. /* Authentication mask for data pointer. */
  151. "pauth_dmask",
  152. /* Authentication mask for code pointer. */
  153. "pauth_cmask"
  154. };
  155. static const char *const aarch64_mte_register_names[] =
  156. {
  157. /* Tag Control Register. */
  158. "tag_ctl"
  159. };
  160. /* AArch64 prologue cache structure. */
  161. struct aarch64_prologue_cache
  162. {
  163. /* The program counter at the start of the function. It is used to
  164. identify this frame as a prologue frame. */
  165. CORE_ADDR func;
  166. /* The program counter at the time this frame was created; i.e. where
  167. this function was called from. It is used to identify this frame as a
  168. stub frame. */
  169. CORE_ADDR prev_pc;
  170. /* The stack pointer at the time this frame was created; i.e. the
  171. caller's stack pointer when this function was called. It is used
  172. to identify this frame. */
  173. CORE_ADDR prev_sp;
  174. /* Is the target available to read from? */
  175. int available_p;
  176. /* The frame base for this frame is just prev_sp - frame size.
  177. FRAMESIZE is the distance from the frame pointer to the
  178. initial stack pointer. */
  179. int framesize;
  180. /* The register used to hold the frame pointer for this frame. */
  181. int framereg;
  182. /* Saved register offsets. */
  183. trad_frame_saved_reg *saved_regs;
  184. };
  185. static void
  186. show_aarch64_debug (struct ui_file *file, int from_tty,
  187. struct cmd_list_element *c, const char *value)
  188. {
  189. gdb_printf (file, _("AArch64 debugging is %s.\n"), value);
  190. }
  191. namespace {
  192. /* Abstract instruction reader. */
  193. class abstract_instruction_reader
  194. {
  195. public:
  196. /* Read in one instruction. */
  197. virtual ULONGEST read (CORE_ADDR memaddr, int len,
  198. enum bfd_endian byte_order) = 0;
  199. };
  200. /* Instruction reader from real target. */
  201. class instruction_reader : public abstract_instruction_reader
  202. {
  203. public:
  204. ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
  205. override
  206. {
  207. return read_code_unsigned_integer (memaddr, len, byte_order);
  208. }
  209. };
  210. } // namespace
  211. /* If address signing is enabled, mask off the signature bits from the link
  212. register, which is passed by value in ADDR, using the register values in
  213. THIS_FRAME. */
  214. static CORE_ADDR
  215. aarch64_frame_unmask_lr (aarch64_gdbarch_tdep *tdep,
  216. struct frame_info *this_frame, CORE_ADDR addr)
  217. {
  218. if (tdep->has_pauth ()
  219. && frame_unwind_register_unsigned (this_frame,
  220. tdep->pauth_ra_state_regnum))
  221. {
  222. int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
  223. CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
  224. addr = addr & ~cmask;
  225. /* Record in the frame that the link register required unmasking. */
  226. set_frame_previous_pc_masked (this_frame);
  227. }
  228. return addr;
  229. }
  230. /* Implement the "get_pc_address_flags" gdbarch method. */
  231. static std::string
  232. aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
  233. {
  234. if (pc != 0 && get_frame_pc_masked (frame))
  235. return "PAC";
  236. return "";
  237. }
  238. /* Analyze a prologue, looking for a recognizable stack frame
  239. and frame pointer. Scan until we encounter a store that could
  240. clobber the stack frame unexpectedly, or an unknown instruction. */
  241. static CORE_ADDR
  242. aarch64_analyze_prologue (struct gdbarch *gdbarch,
  243. CORE_ADDR start, CORE_ADDR limit,
  244. struct aarch64_prologue_cache *cache,
  245. abstract_instruction_reader& reader)
  246. {
  247. enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
  248. int i;
  249. /* Whether the stack has been set. This should be true when we notice a SP
  250. to FP move or if we are using the SP as the base register for storing
  251. data, in case the FP is ommitted. */
  252. bool seen_stack_set = false;
  253. /* Track X registers and D registers in prologue. */
  254. pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
  255. for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
  256. regs[i] = pv_register (i, 0);
  257. pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
  258. for (; start < limit; start += 4)
  259. {
  260. uint32_t insn;
  261. aarch64_inst inst;
  262. insn = reader.read (start, 4, byte_order_for_code);
  263. if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
  264. break;
  265. if (inst.opcode->iclass == addsub_imm
  266. && (inst.opcode->op == OP_ADD
  267. || strcmp ("sub", inst.opcode->name) == 0))
  268. {
  269. unsigned rd = inst.operands[0].reg.regno;
  270. unsigned rn = inst.operands[1].reg.regno;
  271. gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
  272. gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
  273. gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
  274. gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
  275. if (inst.opcode->op == OP_ADD)
  276. {
  277. regs[rd] = pv_add_constant (regs[rn],
  278. inst.operands[2].imm.value);
  279. }
  280. else
  281. {
  282. regs[rd] = pv_add_constant (regs[rn],
  283. -inst.operands[2].imm.value);
  284. }
  285. /* Did we move SP to FP? */
  286. if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
  287. seen_stack_set = true;
  288. }
  289. else if (inst.opcode->iclass == pcreladdr
  290. && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
  291. {
  292. gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
  293. gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
  294. regs[inst.operands[0].reg.regno] = pv_unknown ();
  295. }
  296. else if (inst.opcode->iclass == branch_imm)
  297. {
  298. /* Stop analysis on branch. */
  299. break;
  300. }
  301. else if (inst.opcode->iclass == condbranch)
  302. {
  303. /* Stop analysis on branch. */
  304. break;
  305. }
  306. else if (inst.opcode->iclass == branch_reg)
  307. {
  308. /* Stop analysis on branch. */
  309. break;
  310. }
  311. else if (inst.opcode->iclass == compbranch)
  312. {
  313. /* Stop analysis on branch. */
  314. break;
  315. }
  316. else if (inst.opcode->op == OP_MOVZ)
  317. {
  318. gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
  319. /* If this shows up before we set the stack, keep going. Otherwise
  320. stop the analysis. */
  321. if (seen_stack_set)
  322. break;
  323. regs[inst.operands[0].reg.regno] = pv_unknown ();
  324. }
  325. else if (inst.opcode->iclass == log_shift
  326. && strcmp (inst.opcode->name, "orr") == 0)
  327. {
  328. unsigned rd = inst.operands[0].reg.regno;
  329. unsigned rn = inst.operands[1].reg.regno;
  330. unsigned rm = inst.operands[2].reg.regno;
  331. gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
  332. gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
  333. gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
  334. if (inst.operands[2].shifter.amount == 0
  335. && rn == AARCH64_SP_REGNUM)
  336. regs[rd] = regs[rm];
  337. else
  338. {
  339. aarch64_debug_printf ("prologue analysis gave up "
  340. "addr=%s opcode=0x%x (orr x register)",
  341. core_addr_to_string_nz (start), insn);
  342. break;
  343. }
  344. }
  345. else if (inst.opcode->op == OP_STUR)
  346. {
  347. unsigned rt = inst.operands[0].reg.regno;
  348. unsigned rn = inst.operands[1].addr.base_regno;
  349. int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
  350. gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
  351. gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
  352. gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
  353. gdb_assert (!inst.operands[1].addr.offset.is_reg);
  354. stack.store
  355. (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
  356. size, regs[rt]);
  357. /* Are we storing with SP as a base? */
  358. if (rn == AARCH64_SP_REGNUM)
  359. seen_stack_set = true;
  360. }
  361. else if ((inst.opcode->iclass == ldstpair_off
  362. || (inst.opcode->iclass == ldstpair_indexed
  363. && inst.operands[2].addr.preind))
  364. && strcmp ("stp", inst.opcode->name) == 0)
  365. {
  366. /* STP with addressing mode Pre-indexed and Base register. */
  367. unsigned rt1;
  368. unsigned rt2;
  369. unsigned rn = inst.operands[2].addr.base_regno;
  370. int32_t imm = inst.operands[2].addr.offset.imm;
  371. int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
  372. gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
  373. || inst.operands[0].type == AARCH64_OPND_Ft);
  374. gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
  375. || inst.operands[1].type == AARCH64_OPND_Ft2);
  376. gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
  377. gdb_assert (!inst.operands[2].addr.offset.is_reg);
  378. /* If recording this store would invalidate the store area
  379. (perhaps because rn is not known) then we should abandon
  380. further prologue analysis. */
  381. if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
  382. break;
  383. if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
  384. break;
  385. rt1 = inst.operands[0].reg.regno;
  386. rt2 = inst.operands[1].reg.regno;
  387. if (inst.operands[0].type == AARCH64_OPND_Ft)
  388. {
  389. rt1 += AARCH64_X_REGISTER_COUNT;
  390. rt2 += AARCH64_X_REGISTER_COUNT;
  391. }
  392. stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
  393. stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
  394. if (inst.operands[2].addr.writeback)
  395. regs[rn] = pv_add_constant (regs[rn], imm);
  396. /* Ignore the instruction that allocates stack space and sets
  397. the SP. */
  398. if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
  399. seen_stack_set = true;
  400. }
  401. else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
  402. || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
  403. && (inst.opcode->op == OP_STR_POS
  404. || inst.opcode->op == OP_STRF_POS)))
  405. && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
  406. && strcmp ("str", inst.opcode->name) == 0)
  407. {
  408. /* STR (immediate) */
  409. unsigned int rt = inst.operands[0].reg.regno;
  410. int32_t imm = inst.operands[1].addr.offset.imm;
  411. unsigned int rn = inst.operands[1].addr.base_regno;
  412. int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
  413. gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
  414. || inst.operands[0].type == AARCH64_OPND_Ft);
  415. if (inst.operands[0].type == AARCH64_OPND_Ft)
  416. rt += AARCH64_X_REGISTER_COUNT;
  417. stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
  418. if (inst.operands[1].addr.writeback)
  419. regs[rn] = pv_add_constant (regs[rn], imm);
  420. /* Are we storing with SP as a base? */
  421. if (rn == AARCH64_SP_REGNUM)
  422. seen_stack_set = true;
  423. }
  424. else if (inst.opcode->iclass == testbranch)
  425. {
  426. /* Stop analysis on branch. */
  427. break;
  428. }
  429. else if (inst.opcode->iclass == ic_system)
  430. {
  431. aarch64_gdbarch_tdep *tdep
  432. = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  433. int ra_state_val = 0;
  434. if (insn == 0xd503233f /* paciasp. */
  435. || insn == 0xd503237f /* pacibsp. */)
  436. {
  437. /* Return addresses are mangled. */
  438. ra_state_val = 1;
  439. }
  440. else if (insn == 0xd50323bf /* autiasp. */
  441. || insn == 0xd50323ff /* autibsp. */)
  442. {
  443. /* Return addresses are not mangled. */
  444. ra_state_val = 0;
  445. }
  446. else if (IS_BTI (insn))
  447. /* We don't need to do anything special for a BTI instruction. */
  448. continue;
  449. else
  450. {
  451. aarch64_debug_printf ("prologue analysis gave up addr=%s"
  452. " opcode=0x%x (iclass)",
  453. core_addr_to_string_nz (start), insn);
  454. break;
  455. }
  456. if (tdep->has_pauth () && cache != nullptr)
  457. {
  458. int regnum = tdep->pauth_ra_state_regnum;
  459. cache->saved_regs[regnum].set_value (ra_state_val);
  460. }
  461. }
  462. else
  463. {
  464. aarch64_debug_printf ("prologue analysis gave up addr=%s"
  465. " opcode=0x%x",
  466. core_addr_to_string_nz (start), insn);
  467. break;
  468. }
  469. }
  470. if (cache == NULL)
  471. return start;
  472. if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
  473. {
  474. /* Frame pointer is fp. Frame size is constant. */
  475. cache->framereg = AARCH64_FP_REGNUM;
  476. cache->framesize = -regs[AARCH64_FP_REGNUM].k;
  477. }
  478. else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
  479. {
  480. /* Try the stack pointer. */
  481. cache->framesize = -regs[AARCH64_SP_REGNUM].k;
  482. cache->framereg = AARCH64_SP_REGNUM;
  483. }
  484. else
  485. {
  486. /* We're just out of luck. We don't know where the frame is. */
  487. cache->framereg = -1;
  488. cache->framesize = 0;
  489. }
  490. for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
  491. {
  492. CORE_ADDR offset;
  493. if (stack.find_reg (gdbarch, i, &offset))
  494. cache->saved_regs[i].set_addr (offset);
  495. }
  496. for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
  497. {
  498. int regnum = gdbarch_num_regs (gdbarch);
  499. CORE_ADDR offset;
  500. if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
  501. &offset))
  502. cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
  503. }
  504. return start;
  505. }
  506. static CORE_ADDR
  507. aarch64_analyze_prologue (struct gdbarch *gdbarch,
  508. CORE_ADDR start, CORE_ADDR limit,
  509. struct aarch64_prologue_cache *cache)
  510. {
  511. instruction_reader reader;
  512. return aarch64_analyze_prologue (gdbarch, start, limit, cache,
  513. reader);
  514. }
  515. #if GDB_SELF_TEST
  516. namespace selftests {
  517. /* Instruction reader from manually cooked instruction sequences. */
  518. class instruction_reader_test : public abstract_instruction_reader
  519. {
  520. public:
  521. template<size_t SIZE>
  522. explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
  523. : m_insns (insns), m_insns_size (SIZE)
  524. {}
  525. ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
  526. override
  527. {
  528. SELF_CHECK (len == 4);
  529. SELF_CHECK (memaddr % 4 == 0);
  530. SELF_CHECK (memaddr / 4 < m_insns_size);
  531. return m_insns[memaddr / 4];
  532. }
  533. private:
  534. const uint32_t *m_insns;
  535. size_t m_insns_size;
  536. };
  537. static void
  538. aarch64_analyze_prologue_test (void)
  539. {
  540. struct gdbarch_info info;
  541. info.bfd_arch_info = bfd_scan_arch ("aarch64");
  542. struct gdbarch *gdbarch = gdbarch_find_by_info (info);
  543. SELF_CHECK (gdbarch != NULL);
  544. struct aarch64_prologue_cache cache;
  545. cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
  546. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  547. /* Test the simple prologue in which frame pointer is used. */
  548. {
  549. static const uint32_t insns[] = {
  550. 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
  551. 0x910003fd, /* mov x29, sp */
  552. 0x97ffffe6, /* bl 0x400580 */
  553. };
  554. instruction_reader_test reader (insns);
  555. CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
  556. SELF_CHECK (end == 4 * 2);
  557. SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
  558. SELF_CHECK (cache.framesize == 272);
  559. for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
  560. {
  561. if (i == AARCH64_FP_REGNUM)
  562. SELF_CHECK (cache.saved_regs[i].addr () == -272);
  563. else if (i == AARCH64_LR_REGNUM)
  564. SELF_CHECK (cache.saved_regs[i].addr () == -264);
  565. else
  566. SELF_CHECK (cache.saved_regs[i].is_realreg ()
  567. && cache.saved_regs[i].realreg () == i);
  568. }
  569. for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
  570. {
  571. int num_regs = gdbarch_num_regs (gdbarch);
  572. int regnum = i + num_regs + AARCH64_D0_REGNUM;
  573. SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
  574. && cache.saved_regs[regnum].realreg () == regnum);
  575. }
  576. }
  577. /* Test a prologue in which STR is used and frame pointer is not
  578. used. */
  579. {
  580. static const uint32_t insns[] = {
  581. 0xf81d0ff3, /* str x19, [sp, #-48]! */
  582. 0xb9002fe0, /* str w0, [sp, #44] */
  583. 0xf90013e1, /* str x1, [sp, #32]*/
  584. 0xfd000fe0, /* str d0, [sp, #24] */
  585. 0xaa0203f3, /* mov x19, x2 */
  586. 0xf94013e0, /* ldr x0, [sp, #32] */
  587. };
  588. instruction_reader_test reader (insns);
  589. trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
  590. CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
  591. SELF_CHECK (end == 4 * 5);
  592. SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
  593. SELF_CHECK (cache.framesize == 48);
  594. for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
  595. {
  596. if (i == 1)
  597. SELF_CHECK (cache.saved_regs[i].addr () == -16);
  598. else if (i == 19)
  599. SELF_CHECK (cache.saved_regs[i].addr () == -48);
  600. else
  601. SELF_CHECK (cache.saved_regs[i].is_realreg ()
  602. && cache.saved_regs[i].realreg () == i);
  603. }
  604. for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
  605. {
  606. int num_regs = gdbarch_num_regs (gdbarch);
  607. int regnum = i + num_regs + AARCH64_D0_REGNUM;
  608. if (i == 0)
  609. SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
  610. else
  611. SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
  612. && cache.saved_regs[regnum].realreg () == regnum);
  613. }
  614. }
  615. /* Test handling of movz before setting the frame pointer. */
  616. {
  617. static const uint32_t insns[] = {
  618. 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
  619. 0x52800020, /* mov w0, #0x1 */
  620. 0x910003fd, /* mov x29, sp */
  621. 0x528000a2, /* mov w2, #0x5 */
  622. 0x97fffff8, /* bl 6e4 */
  623. };
  624. instruction_reader_test reader (insns);
  625. trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
  626. CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
  627. /* We should stop at the 4th instruction. */
  628. SELF_CHECK (end == (4 - 1) * 4);
  629. SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
  630. SELF_CHECK (cache.framesize == 16);
  631. }
  632. /* Test handling of movz/stp when using the stack pointer as frame
  633. pointer. */
  634. {
  635. static const uint32_t insns[] = {
  636. 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
  637. 0x52800020, /* mov w0, #0x1 */
  638. 0x290207e0, /* stp w0, w1, [sp, #16] */
  639. 0xa9018fe2, /* stp x2, x3, [sp, #24] */
  640. 0x528000a2, /* mov w2, #0x5 */
  641. 0x97fffff8, /* bl 6e4 */
  642. };
  643. instruction_reader_test reader (insns);
  644. trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
  645. CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
  646. /* We should stop at the 5th instruction. */
  647. SELF_CHECK (end == (5 - 1) * 4);
  648. SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
  649. SELF_CHECK (cache.framesize == 64);
  650. }
  651. /* Test handling of movz/str when using the stack pointer as frame
  652. pointer */
  653. {
  654. static const uint32_t insns[] = {
  655. 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
  656. 0x52800020, /* mov w0, #0x1 */
  657. 0xb9002be4, /* str w4, [sp, #40] */
  658. 0xf9001be5, /* str x5, [sp, #48] */
  659. 0x528000a2, /* mov w2, #0x5 */
  660. 0x97fffff8, /* bl 6e4 */
  661. };
  662. instruction_reader_test reader (insns);
  663. trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
  664. CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
  665. /* We should stop at the 5th instruction. */
  666. SELF_CHECK (end == (5 - 1) * 4);
  667. SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
  668. SELF_CHECK (cache.framesize == 64);
  669. }
  670. /* Test handling of movz/stur when using the stack pointer as frame
  671. pointer. */
  672. {
  673. static const uint32_t insns[] = {
  674. 0xa9bc7bfd, /* stp x29, x30, [sp, #-64]! */
  675. 0x52800020, /* mov w0, #0x1 */
  676. 0xb80343e6, /* stur w6, [sp, #52] */
  677. 0xf80383e7, /* stur x7, [sp, #56] */
  678. 0x528000a2, /* mov w2, #0x5 */
  679. 0x97fffff8, /* bl 6e4 */
  680. };
  681. instruction_reader_test reader (insns);
  682. trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
  683. CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
  684. /* We should stop at the 5th instruction. */
  685. SELF_CHECK (end == (5 - 1) * 4);
  686. SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
  687. SELF_CHECK (cache.framesize == 64);
  688. }
  689. /* Test handling of movz when there is no frame pointer set or no stack
  690. pointer used. */
  691. {
  692. static const uint32_t insns[] = {
  693. 0xa9bf7bfd, /* stp x29, x30, [sp, #-16]! */
  694. 0x52800020, /* mov w0, #0x1 */
  695. 0x528000a2, /* mov w2, #0x5 */
  696. 0x97fffff8, /* bl 6e4 */
  697. };
  698. instruction_reader_test reader (insns);
  699. trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
  700. CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
  701. /* We should stop at the 4th instruction. */
  702. SELF_CHECK (end == (4 - 1) * 4);
  703. SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
  704. SELF_CHECK (cache.framesize == 16);
  705. }
  706. /* Test a prologue in which there is a return address signing instruction. */
  707. if (tdep->has_pauth ())
  708. {
  709. static const uint32_t insns[] = {
  710. 0xd503233f, /* paciasp */
  711. 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
  712. 0x910003fd, /* mov x29, sp */
  713. 0xf801c3f3, /* str x19, [sp, #28] */
  714. 0xb9401fa0, /* ldr x19, [x29, #28] */
  715. };
  716. instruction_reader_test reader (insns);
  717. trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
  718. CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
  719. reader);
  720. SELF_CHECK (end == 4 * 4);
  721. SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
  722. SELF_CHECK (cache.framesize == 48);
  723. for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
  724. {
  725. if (i == 19)
  726. SELF_CHECK (cache.saved_regs[i].addr () == -20);
  727. else if (i == AARCH64_FP_REGNUM)
  728. SELF_CHECK (cache.saved_regs[i].addr () == -48);
  729. else if (i == AARCH64_LR_REGNUM)
  730. SELF_CHECK (cache.saved_regs[i].addr () == -40);
  731. else
  732. SELF_CHECK (cache.saved_regs[i].is_realreg ()
  733. && cache.saved_regs[i].realreg () == i);
  734. }
  735. if (tdep->has_pauth ())
  736. {
  737. int regnum = tdep->pauth_ra_state_regnum;
  738. SELF_CHECK (cache.saved_regs[regnum].is_value ());
  739. }
  740. }
  741. /* Test a prologue with a BTI instruction. */
  742. {
  743. static const uint32_t insns[] = {
  744. 0xd503245f, /* bti */
  745. 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
  746. 0x910003fd, /* mov x29, sp */
  747. 0xf801c3f3, /* str x19, [sp, #28] */
  748. 0xb9401fa0, /* ldr x19, [x29, #28] */
  749. };
  750. instruction_reader_test reader (insns);
  751. trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
  752. CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
  753. reader);
  754. SELF_CHECK (end == 4 * 4);
  755. SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
  756. SELF_CHECK (cache.framesize == 48);
  757. for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
  758. {
  759. if (i == 19)
  760. SELF_CHECK (cache.saved_regs[i].addr () == -20);
  761. else if (i == AARCH64_FP_REGNUM)
  762. SELF_CHECK (cache.saved_regs[i].addr () == -48);
  763. else if (i == AARCH64_LR_REGNUM)
  764. SELF_CHECK (cache.saved_regs[i].addr () == -40);
  765. else
  766. SELF_CHECK (cache.saved_regs[i].is_realreg ()
  767. && cache.saved_regs[i].realreg () == i);
  768. }
  769. }
  770. }
  771. } // namespace selftests
  772. #endif /* GDB_SELF_TEST */
  773. /* Implement the "skip_prologue" gdbarch method. */
  774. static CORE_ADDR
  775. aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
  776. {
  777. CORE_ADDR func_addr, limit_pc;
  778. /* See if we can determine the end of the prologue via the symbol
  779. table. If so, then return either PC, or the PC after the
  780. prologue, whichever is greater. */
  781. if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
  782. {
  783. CORE_ADDR post_prologue_pc
  784. = skip_prologue_using_sal (gdbarch, func_addr);
  785. if (post_prologue_pc != 0)
  786. return std::max (pc, post_prologue_pc);
  787. }
  788. /* Can't determine prologue from the symbol table, need to examine
  789. instructions. */
  790. /* Find an upper limit on the function prologue using the debug
  791. information. If the debug information could not be used to
  792. provide that bound, then use an arbitrary large number as the
  793. upper bound. */
  794. limit_pc = skip_prologue_using_sal (gdbarch, pc);
  795. if (limit_pc == 0)
  796. limit_pc = pc + 128; /* Magic. */
  797. /* Try disassembling prologue. */
  798. return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
  799. }
  800. /* Scan the function prologue for THIS_FRAME and populate the prologue
  801. cache CACHE. */
  802. static void
  803. aarch64_scan_prologue (struct frame_info *this_frame,
  804. struct aarch64_prologue_cache *cache)
  805. {
  806. CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
  807. CORE_ADDR prologue_start;
  808. CORE_ADDR prologue_end;
  809. CORE_ADDR prev_pc = get_frame_pc (this_frame);
  810. struct gdbarch *gdbarch = get_frame_arch (this_frame);
  811. cache->prev_pc = prev_pc;
  812. /* Assume we do not find a frame. */
  813. cache->framereg = -1;
  814. cache->framesize = 0;
  815. if (find_pc_partial_function (block_addr, NULL, &prologue_start,
  816. &prologue_end))
  817. {
  818. struct symtab_and_line sal = find_pc_line (prologue_start, 0);
  819. if (sal.line == 0)
  820. {
  821. /* No line info so use the current PC. */
  822. prologue_end = prev_pc;
  823. }
  824. else if (sal.end < prologue_end)
  825. {
  826. /* The next line begins after the function end. */
  827. prologue_end = sal.end;
  828. }
  829. prologue_end = std::min (prologue_end, prev_pc);
  830. aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
  831. }
  832. else
  833. {
  834. CORE_ADDR frame_loc;
  835. frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
  836. if (frame_loc == 0)
  837. return;
  838. cache->framereg = AARCH64_FP_REGNUM;
  839. cache->framesize = 16;
  840. cache->saved_regs[29].set_addr (0);
  841. cache->saved_regs[30].set_addr (8);
  842. }
  843. }
  844. /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
  845. function may throw an exception if the inferior's registers or memory is
  846. not available. */
  847. static void
  848. aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
  849. struct aarch64_prologue_cache *cache)
  850. {
  851. CORE_ADDR unwound_fp;
  852. int reg;
  853. aarch64_scan_prologue (this_frame, cache);
  854. if (cache->framereg == -1)
  855. return;
  856. unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
  857. if (unwound_fp == 0)
  858. return;
  859. cache->prev_sp = unwound_fp + cache->framesize;
  860. /* Calculate actual addresses of saved registers using offsets
  861. determined by aarch64_analyze_prologue. */
  862. for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
  863. if (cache->saved_regs[reg].is_addr ())
  864. cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
  865. + cache->prev_sp);
  866. cache->func = get_frame_func (this_frame);
  867. cache->available_p = 1;
  868. }
  869. /* Allocate and fill in *THIS_CACHE with information about the prologue of
  870. *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
  871. Return a pointer to the current aarch64_prologue_cache in
  872. *THIS_CACHE. */
  873. static struct aarch64_prologue_cache *
  874. aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
  875. {
  876. struct aarch64_prologue_cache *cache;
  877. if (*this_cache != NULL)
  878. return (struct aarch64_prologue_cache *) *this_cache;
  879. cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
  880. cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
  881. *this_cache = cache;
  882. try
  883. {
  884. aarch64_make_prologue_cache_1 (this_frame, cache);
  885. }
  886. catch (const gdb_exception_error &ex)
  887. {
  888. if (ex.error != NOT_AVAILABLE_ERROR)
  889. throw;
  890. }
  891. return cache;
  892. }
  893. /* Implement the "stop_reason" frame_unwind method. */
  894. static enum unwind_stop_reason
  895. aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
  896. void **this_cache)
  897. {
  898. struct aarch64_prologue_cache *cache
  899. = aarch64_make_prologue_cache (this_frame, this_cache);
  900. if (!cache->available_p)
  901. return UNWIND_UNAVAILABLE;
  902. /* Halt the backtrace at "_start". */
  903. gdbarch *arch = get_frame_arch (this_frame);
  904. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (arch);
  905. if (cache->prev_pc <= tdep->lowest_pc)
  906. return UNWIND_OUTERMOST;
  907. /* We've hit a wall, stop. */
  908. if (cache->prev_sp == 0)
  909. return UNWIND_OUTERMOST;
  910. return UNWIND_NO_REASON;
  911. }
  912. /* Our frame ID for a normal frame is the current function's starting
  913. PC and the caller's SP when we were called. */
  914. static void
  915. aarch64_prologue_this_id (struct frame_info *this_frame,
  916. void **this_cache, struct frame_id *this_id)
  917. {
  918. struct aarch64_prologue_cache *cache
  919. = aarch64_make_prologue_cache (this_frame, this_cache);
  920. if (!cache->available_p)
  921. *this_id = frame_id_build_unavailable_stack (cache->func);
  922. else
  923. *this_id = frame_id_build (cache->prev_sp, cache->func);
  924. }
  925. /* Implement the "prev_register" frame_unwind method. */
  926. static struct value *
  927. aarch64_prologue_prev_register (struct frame_info *this_frame,
  928. void **this_cache, int prev_regnum)
  929. {
  930. struct aarch64_prologue_cache *cache
  931. = aarch64_make_prologue_cache (this_frame, this_cache);
  932. /* If we are asked to unwind the PC, then we need to return the LR
  933. instead. The prologue may save PC, but it will point into this
  934. frame's prologue, not the next frame's resume location. */
  935. if (prev_regnum == AARCH64_PC_REGNUM)
  936. {
  937. CORE_ADDR lr;
  938. struct gdbarch *gdbarch = get_frame_arch (this_frame);
  939. aarch64_gdbarch_tdep *tdep
  940. = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  941. lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
  942. if (tdep->has_pauth ()
  943. && cache->saved_regs[tdep->pauth_ra_state_regnum].is_value ())
  944. lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
  945. return frame_unwind_got_constant (this_frame, prev_regnum, lr);
  946. }
  947. /* SP is generally not saved to the stack, but this frame is
  948. identified by the next frame's stack pointer at the time of the
  949. call. The value was already reconstructed into PREV_SP. */
  950. /*
  951. +----------+ ^
  952. | saved lr | |
  953. +->| saved fp |--+
  954. | | |
  955. | | | <- Previous SP
  956. | +----------+
  957. | | saved lr |
  958. +--| saved fp |<- FP
  959. | |
  960. | |<- SP
  961. +----------+ */
  962. if (prev_regnum == AARCH64_SP_REGNUM)
  963. return frame_unwind_got_constant (this_frame, prev_regnum,
  964. cache->prev_sp);
  965. return trad_frame_get_prev_register (this_frame, cache->saved_regs,
  966. prev_regnum);
  967. }
  968. /* AArch64 prologue unwinder. */
  969. static frame_unwind aarch64_prologue_unwind =
  970. {
  971. "aarch64 prologue",
  972. NORMAL_FRAME,
  973. aarch64_prologue_frame_unwind_stop_reason,
  974. aarch64_prologue_this_id,
  975. aarch64_prologue_prev_register,
  976. NULL,
  977. default_frame_sniffer
  978. };
  979. /* Allocate and fill in *THIS_CACHE with information about the prologue of
  980. *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
  981. Return a pointer to the current aarch64_prologue_cache in
  982. *THIS_CACHE. */
  983. static struct aarch64_prologue_cache *
  984. aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
  985. {
  986. struct aarch64_prologue_cache *cache;
  987. if (*this_cache != NULL)
  988. return (struct aarch64_prologue_cache *) *this_cache;
  989. cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
  990. cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
  991. *this_cache = cache;
  992. try
  993. {
  994. cache->prev_sp = get_frame_register_unsigned (this_frame,
  995. AARCH64_SP_REGNUM);
  996. cache->prev_pc = get_frame_pc (this_frame);
  997. cache->available_p = 1;
  998. }
  999. catch (const gdb_exception_error &ex)
  1000. {
  1001. if (ex.error != NOT_AVAILABLE_ERROR)
  1002. throw;
  1003. }
  1004. return cache;
  1005. }
  1006. /* Implement the "stop_reason" frame_unwind method. */
  1007. static enum unwind_stop_reason
  1008. aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
  1009. void **this_cache)
  1010. {
  1011. struct aarch64_prologue_cache *cache
  1012. = aarch64_make_stub_cache (this_frame, this_cache);
  1013. if (!cache->available_p)
  1014. return UNWIND_UNAVAILABLE;
  1015. return UNWIND_NO_REASON;
  1016. }
  1017. /* Our frame ID for a stub frame is the current SP and LR. */
  1018. static void
  1019. aarch64_stub_this_id (struct frame_info *this_frame,
  1020. void **this_cache, struct frame_id *this_id)
  1021. {
  1022. struct aarch64_prologue_cache *cache
  1023. = aarch64_make_stub_cache (this_frame, this_cache);
  1024. if (cache->available_p)
  1025. *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
  1026. else
  1027. *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
  1028. }
  1029. /* Implement the "sniffer" frame_unwind method. */
  1030. static int
  1031. aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
  1032. struct frame_info *this_frame,
  1033. void **this_prologue_cache)
  1034. {
  1035. CORE_ADDR addr_in_block;
  1036. gdb_byte dummy[4];
  1037. addr_in_block = get_frame_address_in_block (this_frame);
  1038. if (in_plt_section (addr_in_block)
  1039. /* We also use the stub winder if the target memory is unreadable
  1040. to avoid having the prologue unwinder trying to read it. */
  1041. || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
  1042. return 1;
  1043. return 0;
  1044. }
  1045. /* AArch64 stub unwinder. */
  1046. static frame_unwind aarch64_stub_unwind =
  1047. {
  1048. "aarch64 stub",
  1049. NORMAL_FRAME,
  1050. aarch64_stub_frame_unwind_stop_reason,
  1051. aarch64_stub_this_id,
  1052. aarch64_prologue_prev_register,
  1053. NULL,
  1054. aarch64_stub_unwind_sniffer
  1055. };
  1056. /* Return the frame base address of *THIS_FRAME. */
  1057. static CORE_ADDR
  1058. aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
  1059. {
  1060. struct aarch64_prologue_cache *cache
  1061. = aarch64_make_prologue_cache (this_frame, this_cache);
  1062. return cache->prev_sp - cache->framesize;
  1063. }
  1064. /* AArch64 default frame base information. */
  1065. static frame_base aarch64_normal_base =
  1066. {
  1067. &aarch64_prologue_unwind,
  1068. aarch64_normal_frame_base,
  1069. aarch64_normal_frame_base,
  1070. aarch64_normal_frame_base
  1071. };
  1072. /* Return the value of the REGNUM register in the previous frame of
  1073. *THIS_FRAME. */
  1074. static struct value *
  1075. aarch64_dwarf2_prev_register (struct frame_info *this_frame,
  1076. void **this_cache, int regnum)
  1077. {
  1078. gdbarch *arch = get_frame_arch (this_frame);
  1079. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (arch);
  1080. CORE_ADDR lr;
  1081. switch (regnum)
  1082. {
  1083. case AARCH64_PC_REGNUM:
  1084. lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
  1085. lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
  1086. return frame_unwind_got_constant (this_frame, regnum, lr);
  1087. default:
  1088. internal_error (__FILE__, __LINE__,
  1089. _("Unexpected register %d"), regnum);
  1090. }
  1091. }
  1092. static const unsigned char op_lit0 = DW_OP_lit0;
  1093. static const unsigned char op_lit1 = DW_OP_lit1;
  1094. /* Implement the "init_reg" dwarf2_frame_ops method. */
  1095. static void
  1096. aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
  1097. struct dwarf2_frame_state_reg *reg,
  1098. struct frame_info *this_frame)
  1099. {
  1100. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1101. switch (regnum)
  1102. {
  1103. case AARCH64_PC_REGNUM:
  1104. reg->how = DWARF2_FRAME_REG_FN;
  1105. reg->loc.fn = aarch64_dwarf2_prev_register;
  1106. return;
  1107. case AARCH64_SP_REGNUM:
  1108. reg->how = DWARF2_FRAME_REG_CFA;
  1109. return;
  1110. }
  1111. /* Init pauth registers. */
  1112. if (tdep->has_pauth ())
  1113. {
  1114. if (regnum == tdep->pauth_ra_state_regnum)
  1115. {
  1116. /* Initialize RA_STATE to zero. */
  1117. reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
  1118. reg->loc.exp.start = &op_lit0;
  1119. reg->loc.exp.len = 1;
  1120. return;
  1121. }
  1122. else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
  1123. || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
  1124. {
  1125. reg->how = DWARF2_FRAME_REG_SAME_VALUE;
  1126. return;
  1127. }
  1128. }
  1129. }
  1130. /* Implement the execute_dwarf_cfa_vendor_op method. */
  1131. static bool
  1132. aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
  1133. struct dwarf2_frame_state *fs)
  1134. {
  1135. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1136. struct dwarf2_frame_state_reg *ra_state;
  1137. if (op == DW_CFA_AARCH64_negate_ra_state)
  1138. {
  1139. /* On systems without pauth, treat as a nop. */
  1140. if (!tdep->has_pauth ())
  1141. return true;
  1142. /* Allocate RA_STATE column if it's not allocated yet. */
  1143. fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
  1144. /* Toggle the status of RA_STATE between 0 and 1. */
  1145. ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
  1146. ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
  1147. if (ra_state->loc.exp.start == nullptr
  1148. || ra_state->loc.exp.start == &op_lit0)
  1149. ra_state->loc.exp.start = &op_lit1;
  1150. else
  1151. ra_state->loc.exp.start = &op_lit0;
  1152. ra_state->loc.exp.len = 1;
  1153. return true;
  1154. }
  1155. return false;
  1156. }
  1157. /* Used for matching BRK instructions for AArch64. */
  1158. static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
  1159. static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
  1160. /* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */
  1161. static bool
  1162. aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
  1163. {
  1164. const uint32_t insn_len = 4;
  1165. gdb_byte target_mem[4];
  1166. /* Enable the automatic memory restoration from breakpoints while
  1167. we read the memory. Otherwise we may find temporary breakpoints, ones
  1168. inserted by GDB, and flag them as permanent breakpoints. */
  1169. scoped_restore restore_memory
  1170. = make_scoped_restore_show_memory_breakpoints (0);
  1171. if (target_read_memory (address, target_mem, insn_len) == 0)
  1172. {
  1173. uint32_t insn =
  1174. (uint32_t) extract_unsigned_integer (target_mem, insn_len,
  1175. gdbarch_byte_order_for_code (gdbarch));
  1176. /* Check if INSN is a BRK instruction pattern. There are multiple choices
  1177. of such instructions with different immediate values. Different OS'
  1178. may use a different variation, but they have the same outcome. */
  1179. return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
  1180. }
  1181. return false;
  1182. }
  1183. /* When arguments must be pushed onto the stack, they go on in reverse
  1184. order. The code below implements a FILO (stack) to do this. */
  1185. struct stack_item_t
  1186. {
  1187. /* Value to pass on stack. It can be NULL if this item is for stack
  1188. padding. */
  1189. const gdb_byte *data;
  1190. /* Size in bytes of value to pass on stack. */
  1191. int len;
  1192. };
  1193. /* Implement the gdbarch type alignment method, overrides the generic
  1194. alignment algorithm for anything that is aarch64 specific. */
  1195. static ULONGEST
  1196. aarch64_type_align (gdbarch *gdbarch, struct type *t)
  1197. {
  1198. t = check_typedef (t);
  1199. if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
  1200. {
  1201. /* Use the natural alignment for vector types (the same for
  1202. scalar type), but the maximum alignment is 128-bit. */
  1203. if (TYPE_LENGTH (t) > 16)
  1204. return 16;
  1205. else
  1206. return TYPE_LENGTH (t);
  1207. }
  1208. /* Allow the common code to calculate the alignment. */
  1209. return 0;
  1210. }
  1211. /* Worker function for aapcs_is_vfp_call_or_return_candidate.
  1212. Return the number of register required, or -1 on failure.
  1213. When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
  1214. to the element, else fail if the type of this element does not match the
  1215. existing value. */
  1216. static int
  1217. aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
  1218. struct type **fundamental_type)
  1219. {
  1220. if (type == nullptr)
  1221. return -1;
  1222. switch (type->code ())
  1223. {
  1224. case TYPE_CODE_FLT:
  1225. if (TYPE_LENGTH (type) > 16)
  1226. return -1;
  1227. if (*fundamental_type == nullptr)
  1228. *fundamental_type = type;
  1229. else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
  1230. || type->code () != (*fundamental_type)->code ())
  1231. return -1;
  1232. return 1;
  1233. case TYPE_CODE_COMPLEX:
  1234. {
  1235. struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
  1236. if (TYPE_LENGTH (target_type) > 16)
  1237. return -1;
  1238. if (*fundamental_type == nullptr)
  1239. *fundamental_type = target_type;
  1240. else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
  1241. || target_type->code () != (*fundamental_type)->code ())
  1242. return -1;
  1243. return 2;
  1244. }
  1245. case TYPE_CODE_ARRAY:
  1246. {
  1247. if (type->is_vector ())
  1248. {
  1249. if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
  1250. return -1;
  1251. if (*fundamental_type == nullptr)
  1252. *fundamental_type = type;
  1253. else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
  1254. || type->code () != (*fundamental_type)->code ())
  1255. return -1;
  1256. return 1;
  1257. }
  1258. else
  1259. {
  1260. struct type *target_type = TYPE_TARGET_TYPE (type);
  1261. int count = aapcs_is_vfp_call_or_return_candidate_1
  1262. (target_type, fundamental_type);
  1263. if (count == -1)
  1264. return count;
  1265. count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
  1266. return count;
  1267. }
  1268. }
  1269. case TYPE_CODE_STRUCT:
  1270. case TYPE_CODE_UNION:
  1271. {
  1272. int count = 0;
  1273. for (int i = 0; i < type->num_fields (); i++)
  1274. {
  1275. /* Ignore any static fields. */
  1276. if (field_is_static (&type->field (i)))
  1277. continue;
  1278. struct type *member = check_typedef (type->field (i).type ());
  1279. int sub_count = aapcs_is_vfp_call_or_return_candidate_1
  1280. (member, fundamental_type);
  1281. if (sub_count == -1)
  1282. return -1;
  1283. count += sub_count;
  1284. }
  1285. /* Ensure there is no padding between the fields (allowing for empty
  1286. zero length structs) */
  1287. int ftype_length = (*fundamental_type == nullptr)
  1288. ? 0 : TYPE_LENGTH (*fundamental_type);
  1289. if (count * ftype_length != TYPE_LENGTH (type))
  1290. return -1;
  1291. return count;
  1292. }
  1293. default:
  1294. break;
  1295. }
  1296. return -1;
  1297. }
  1298. /* Return true if an argument, whose type is described by TYPE, can be passed or
  1299. returned in simd/fp registers, providing enough parameter passing registers
  1300. are available. This is as described in the AAPCS64.
  1301. Upon successful return, *COUNT returns the number of needed registers,
  1302. *FUNDAMENTAL_TYPE contains the type of those registers.
  1303. Candidate as per the AAPCS64 5.4.2.C is either a:
  1304. - float.
  1305. - short-vector.
  1306. - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
  1307. all the members are floats and has at most 4 members.
  1308. - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
  1309. all the members are short vectors and has at most 4 members.
  1310. - Complex (7.1.1)
  1311. Note that HFAs and HVAs can include nested structures and arrays. */
  1312. static bool
  1313. aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
  1314. struct type **fundamental_type)
  1315. {
  1316. if (type == nullptr)
  1317. return false;
  1318. *fundamental_type = nullptr;
  1319. int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
  1320. fundamental_type);
  1321. if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
  1322. {
  1323. *count = ag_count;
  1324. return true;
  1325. }
  1326. else
  1327. return false;
  1328. }
  1329. /* AArch64 function call information structure. */
  1330. struct aarch64_call_info
  1331. {
  1332. /* the current argument number. */
  1333. unsigned argnum = 0;
  1334. /* The next general purpose register number, equivalent to NGRN as
  1335. described in the AArch64 Procedure Call Standard. */
  1336. unsigned ngrn = 0;
  1337. /* The next SIMD and floating point register number, equivalent to
  1338. NSRN as described in the AArch64 Procedure Call Standard. */
  1339. unsigned nsrn = 0;
  1340. /* The next stacked argument address, equivalent to NSAA as
  1341. described in the AArch64 Procedure Call Standard. */
  1342. unsigned nsaa = 0;
  1343. /* Stack item vector. */
  1344. std::vector<stack_item_t> si;
  1345. };
  1346. /* Pass a value in a sequence of consecutive X registers. The caller
  1347. is responsible for ensuring sufficient registers are available. */
  1348. static void
  1349. pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
  1350. struct aarch64_call_info *info, struct type *type,
  1351. struct value *arg)
  1352. {
  1353. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  1354. int len = TYPE_LENGTH (type);
  1355. enum type_code typecode = type->code ();
  1356. int regnum = AARCH64_X0_REGNUM + info->ngrn;
  1357. const bfd_byte *buf = value_contents (arg).data ();
  1358. info->argnum++;
  1359. while (len > 0)
  1360. {
  1361. int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
  1362. CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
  1363. byte_order);
  1364. /* Adjust sub-word struct/union args when big-endian. */
  1365. if (byte_order == BFD_ENDIAN_BIG
  1366. && partial_len < X_REGISTER_SIZE
  1367. && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
  1368. regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
  1369. aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
  1370. gdbarch_register_name (gdbarch, regnum),
  1371. phex (regval, X_REGISTER_SIZE));
  1372. regcache_cooked_write_unsigned (regcache, regnum, regval);
  1373. len -= partial_len;
  1374. buf += partial_len;
  1375. regnum++;
  1376. }
  1377. }
  1378. /* Attempt to marshall a value in a V register. Return 1 if
  1379. successful, or 0 if insufficient registers are available. This
  1380. function, unlike the equivalent pass_in_x() function does not
  1381. handle arguments spread across multiple registers. */
  1382. static int
  1383. pass_in_v (struct gdbarch *gdbarch,
  1384. struct regcache *regcache,
  1385. struct aarch64_call_info *info,
  1386. int len, const bfd_byte *buf)
  1387. {
  1388. if (info->nsrn < 8)
  1389. {
  1390. int regnum = AARCH64_V0_REGNUM + info->nsrn;
  1391. /* Enough space for a full vector register. */
  1392. gdb_byte reg[register_size (gdbarch, regnum)];
  1393. gdb_assert (len <= sizeof (reg));
  1394. info->argnum++;
  1395. info->nsrn++;
  1396. memset (reg, 0, sizeof (reg));
  1397. /* PCS C.1, the argument is allocated to the least significant
  1398. bits of V register. */
  1399. memcpy (reg, buf, len);
  1400. regcache->cooked_write (regnum, reg);
  1401. aarch64_debug_printf ("arg %d in %s", info->argnum,
  1402. gdbarch_register_name (gdbarch, regnum));
  1403. return 1;
  1404. }
  1405. info->nsrn = 8;
  1406. return 0;
  1407. }
  1408. /* Marshall an argument onto the stack. */
  1409. static void
  1410. pass_on_stack (struct aarch64_call_info *info, struct type *type,
  1411. struct value *arg)
  1412. {
  1413. const bfd_byte *buf = value_contents (arg).data ();
  1414. int len = TYPE_LENGTH (type);
  1415. int align;
  1416. stack_item_t item;
  1417. info->argnum++;
  1418. align = type_align (type);
  1419. /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
  1420. Natural alignment of the argument's type. */
  1421. align = align_up (align, 8);
  1422. /* The AArch64 PCS requires at most doubleword alignment. */
  1423. if (align > 16)
  1424. align = 16;
  1425. aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
  1426. info->nsaa);
  1427. item.len = len;
  1428. item.data = buf;
  1429. info->si.push_back (item);
  1430. info->nsaa += len;
  1431. if (info->nsaa & (align - 1))
  1432. {
  1433. /* Push stack alignment padding. */
  1434. int pad = align - (info->nsaa & (align - 1));
  1435. item.len = pad;
  1436. item.data = NULL;
  1437. info->si.push_back (item);
  1438. info->nsaa += pad;
  1439. }
  1440. }
  1441. /* Marshall an argument into a sequence of one or more consecutive X
  1442. registers or, if insufficient X registers are available then onto
  1443. the stack. */
  1444. static void
  1445. pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
  1446. struct aarch64_call_info *info, struct type *type,
  1447. struct value *arg)
  1448. {
  1449. int len = TYPE_LENGTH (type);
  1450. int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
  1451. /* PCS C.13 - Pass in registers if we have enough spare */
  1452. if (info->ngrn + nregs <= 8)
  1453. {
  1454. pass_in_x (gdbarch, regcache, info, type, arg);
  1455. info->ngrn += nregs;
  1456. }
  1457. else
  1458. {
  1459. info->ngrn = 8;
  1460. pass_on_stack (info, type, arg);
  1461. }
  1462. }
  1463. /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
  1464. aapcs_is_vfp_call_or_return_candidate and there are enough spare V
  1465. registers. A return value of false is an error state as the value will have
  1466. been partially passed to the stack. */
  1467. static bool
  1468. pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
  1469. struct aarch64_call_info *info, struct type *arg_type,
  1470. struct value *arg)
  1471. {
  1472. switch (arg_type->code ())
  1473. {
  1474. case TYPE_CODE_FLT:
  1475. return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
  1476. value_contents (arg).data ());
  1477. break;
  1478. case TYPE_CODE_COMPLEX:
  1479. {
  1480. const bfd_byte *buf = value_contents (arg).data ();
  1481. struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
  1482. if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
  1483. buf))
  1484. return false;
  1485. return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
  1486. buf + TYPE_LENGTH (target_type));
  1487. }
  1488. case TYPE_CODE_ARRAY:
  1489. if (arg_type->is_vector ())
  1490. return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
  1491. value_contents (arg).data ());
  1492. /* fall through. */
  1493. case TYPE_CODE_STRUCT:
  1494. case TYPE_CODE_UNION:
  1495. for (int i = 0; i < arg_type->num_fields (); i++)
  1496. {
  1497. /* Don't include static fields. */
  1498. if (field_is_static (&arg_type->field (i)))
  1499. continue;
  1500. struct value *field = value_primitive_field (arg, 0, i, arg_type);
  1501. struct type *field_type = check_typedef (value_type (field));
  1502. if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
  1503. field))
  1504. return false;
  1505. }
  1506. return true;
  1507. default:
  1508. return false;
  1509. }
  1510. }
  1511. /* Implement the "push_dummy_call" gdbarch method. */
  1512. static CORE_ADDR
  1513. aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
  1514. struct regcache *regcache, CORE_ADDR bp_addr,
  1515. int nargs,
  1516. struct value **args, CORE_ADDR sp,
  1517. function_call_return_method return_method,
  1518. CORE_ADDR struct_addr)
  1519. {
  1520. int argnum;
  1521. struct aarch64_call_info info;
  1522. /* We need to know what the type of the called function is in order
  1523. to determine the number of named/anonymous arguments for the
  1524. actual argument placement, and the return type in order to handle
  1525. return value correctly.
  1526. The generic code above us views the decision of return in memory
  1527. or return in registers as a two stage processes. The language
  1528. handler is consulted first and may decide to return in memory (eg
  1529. class with copy constructor returned by value), this will cause
  1530. the generic code to allocate space AND insert an initial leading
  1531. argument.
  1532. If the language code does not decide to pass in memory then the
  1533. target code is consulted.
  1534. If the language code decides to pass in memory we want to move
  1535. the pointer inserted as the initial argument from the argument
  1536. list and into X8, the conventional AArch64 struct return pointer
  1537. register. */
  1538. /* Set the return address. For the AArch64, the return breakpoint
  1539. is always at BP_ADDR. */
  1540. regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
  1541. /* If we were given an initial argument for the return slot, lose it. */
  1542. if (return_method == return_method_hidden_param)
  1543. {
  1544. args++;
  1545. nargs--;
  1546. }
  1547. /* The struct_return pointer occupies X8. */
  1548. if (return_method != return_method_normal)
  1549. {
  1550. aarch64_debug_printf ("struct return in %s = 0x%s",
  1551. gdbarch_register_name
  1552. (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
  1553. paddress (gdbarch, struct_addr));
  1554. regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
  1555. struct_addr);
  1556. }
  1557. for (argnum = 0; argnum < nargs; argnum++)
  1558. {
  1559. struct value *arg = args[argnum];
  1560. struct type *arg_type, *fundamental_type;
  1561. int len, elements;
  1562. arg_type = check_typedef (value_type (arg));
  1563. len = TYPE_LENGTH (arg_type);
  1564. /* If arg can be passed in v registers as per the AAPCS64, then do so if
  1565. if there are enough spare registers. */
  1566. if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
  1567. &fundamental_type))
  1568. {
  1569. if (info.nsrn + elements <= 8)
  1570. {
  1571. /* We know that we have sufficient registers available therefore
  1572. this will never need to fallback to the stack. */
  1573. if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
  1574. arg))
  1575. gdb_assert_not_reached ("Failed to push args");
  1576. }
  1577. else
  1578. {
  1579. info.nsrn = 8;
  1580. pass_on_stack (&info, arg_type, arg);
  1581. }
  1582. continue;
  1583. }
  1584. switch (arg_type->code ())
  1585. {
  1586. case TYPE_CODE_INT:
  1587. case TYPE_CODE_BOOL:
  1588. case TYPE_CODE_CHAR:
  1589. case TYPE_CODE_RANGE:
  1590. case TYPE_CODE_ENUM:
  1591. if (len < 4 && !is_fixed_point_type (arg_type))
  1592. {
  1593. /* Promote to 32 bit integer. */
  1594. if (arg_type->is_unsigned ())
  1595. arg_type = builtin_type (gdbarch)->builtin_uint32;
  1596. else
  1597. arg_type = builtin_type (gdbarch)->builtin_int32;
  1598. arg = value_cast (arg_type, arg);
  1599. }
  1600. pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
  1601. break;
  1602. case TYPE_CODE_STRUCT:
  1603. case TYPE_CODE_ARRAY:
  1604. case TYPE_CODE_UNION:
  1605. if (len > 16)
  1606. {
  1607. /* PCS B.7 Aggregates larger than 16 bytes are passed by
  1608. invisible reference. */
  1609. /* Allocate aligned storage. */
  1610. sp = align_down (sp - len, 16);
  1611. /* Write the real data into the stack. */
  1612. write_memory (sp, value_contents (arg).data (), len);
  1613. /* Construct the indirection. */
  1614. arg_type = lookup_pointer_type (arg_type);
  1615. arg = value_from_pointer (arg_type, sp);
  1616. pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
  1617. }
  1618. else
  1619. /* PCS C.15 / C.18 multiple values pass. */
  1620. pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
  1621. break;
  1622. default:
  1623. pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
  1624. break;
  1625. }
  1626. }
  1627. /* Make sure stack retains 16 byte alignment. */
  1628. if (info.nsaa & 15)
  1629. sp -= 16 - (info.nsaa & 15);
  1630. while (!info.si.empty ())
  1631. {
  1632. const stack_item_t &si = info.si.back ();
  1633. sp -= si.len;
  1634. if (si.data != NULL)
  1635. write_memory (sp, si.data, si.len);
  1636. info.si.pop_back ();
  1637. }
  1638. /* Finally, update the SP register. */
  1639. regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
  1640. return sp;
  1641. }
  1642. /* Implement the "frame_align" gdbarch method. */
  1643. static CORE_ADDR
  1644. aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
  1645. {
  1646. /* Align the stack to sixteen bytes. */
  1647. return sp & ~(CORE_ADDR) 15;
  1648. }
  1649. /* Return the type for an AdvSISD Q register. */
  1650. static struct type *
  1651. aarch64_vnq_type (struct gdbarch *gdbarch)
  1652. {
  1653. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1654. if (tdep->vnq_type == NULL)
  1655. {
  1656. struct type *t;
  1657. struct type *elem;
  1658. t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
  1659. TYPE_CODE_UNION);
  1660. elem = builtin_type (gdbarch)->builtin_uint128;
  1661. append_composite_type_field (t, "u", elem);
  1662. elem = builtin_type (gdbarch)->builtin_int128;
  1663. append_composite_type_field (t, "s", elem);
  1664. tdep->vnq_type = t;
  1665. }
  1666. return tdep->vnq_type;
  1667. }
  1668. /* Return the type for an AdvSISD D register. */
  1669. static struct type *
  1670. aarch64_vnd_type (struct gdbarch *gdbarch)
  1671. {
  1672. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1673. if (tdep->vnd_type == NULL)
  1674. {
  1675. struct type *t;
  1676. struct type *elem;
  1677. t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
  1678. TYPE_CODE_UNION);
  1679. elem = builtin_type (gdbarch)->builtin_double;
  1680. append_composite_type_field (t, "f", elem);
  1681. elem = builtin_type (gdbarch)->builtin_uint64;
  1682. append_composite_type_field (t, "u", elem);
  1683. elem = builtin_type (gdbarch)->builtin_int64;
  1684. append_composite_type_field (t, "s", elem);
  1685. tdep->vnd_type = t;
  1686. }
  1687. return tdep->vnd_type;
  1688. }
  1689. /* Return the type for an AdvSISD S register. */
  1690. static struct type *
  1691. aarch64_vns_type (struct gdbarch *gdbarch)
  1692. {
  1693. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1694. if (tdep->vns_type == NULL)
  1695. {
  1696. struct type *t;
  1697. struct type *elem;
  1698. t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
  1699. TYPE_CODE_UNION);
  1700. elem = builtin_type (gdbarch)->builtin_float;
  1701. append_composite_type_field (t, "f", elem);
  1702. elem = builtin_type (gdbarch)->builtin_uint32;
  1703. append_composite_type_field (t, "u", elem);
  1704. elem = builtin_type (gdbarch)->builtin_int32;
  1705. append_composite_type_field (t, "s", elem);
  1706. tdep->vns_type = t;
  1707. }
  1708. return tdep->vns_type;
  1709. }
  1710. /* Return the type for an AdvSISD H register. */
  1711. static struct type *
  1712. aarch64_vnh_type (struct gdbarch *gdbarch)
  1713. {
  1714. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1715. if (tdep->vnh_type == NULL)
  1716. {
  1717. struct type *t;
  1718. struct type *elem;
  1719. t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
  1720. TYPE_CODE_UNION);
  1721. elem = builtin_type (gdbarch)->builtin_bfloat16;
  1722. append_composite_type_field (t, "bf", elem);
  1723. elem = builtin_type (gdbarch)->builtin_half;
  1724. append_composite_type_field (t, "f", elem);
  1725. elem = builtin_type (gdbarch)->builtin_uint16;
  1726. append_composite_type_field (t, "u", elem);
  1727. elem = builtin_type (gdbarch)->builtin_int16;
  1728. append_composite_type_field (t, "s", elem);
  1729. tdep->vnh_type = t;
  1730. }
  1731. return tdep->vnh_type;
  1732. }
  1733. /* Return the type for an AdvSISD B register. */
  1734. static struct type *
  1735. aarch64_vnb_type (struct gdbarch *gdbarch)
  1736. {
  1737. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1738. if (tdep->vnb_type == NULL)
  1739. {
  1740. struct type *t;
  1741. struct type *elem;
  1742. t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
  1743. TYPE_CODE_UNION);
  1744. elem = builtin_type (gdbarch)->builtin_uint8;
  1745. append_composite_type_field (t, "u", elem);
  1746. elem = builtin_type (gdbarch)->builtin_int8;
  1747. append_composite_type_field (t, "s", elem);
  1748. tdep->vnb_type = t;
  1749. }
  1750. return tdep->vnb_type;
  1751. }
  1752. /* Return the type for an AdvSISD V register. */
  1753. static struct type *
  1754. aarch64_vnv_type (struct gdbarch *gdbarch)
  1755. {
  1756. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1757. if (tdep->vnv_type == NULL)
  1758. {
  1759. /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
  1760. slice from the non-pseudo vector registers. However NEON V registers
  1761. are always vector registers, and need constructing as such. */
  1762. const struct builtin_type *bt = builtin_type (gdbarch);
  1763. struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
  1764. TYPE_CODE_UNION);
  1765. struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
  1766. TYPE_CODE_UNION);
  1767. append_composite_type_field (sub, "f",
  1768. init_vector_type (bt->builtin_double, 2));
  1769. append_composite_type_field (sub, "u",
  1770. init_vector_type (bt->builtin_uint64, 2));
  1771. append_composite_type_field (sub, "s",
  1772. init_vector_type (bt->builtin_int64, 2));
  1773. append_composite_type_field (t, "d", sub);
  1774. sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
  1775. TYPE_CODE_UNION);
  1776. append_composite_type_field (sub, "f",
  1777. init_vector_type (bt->builtin_float, 4));
  1778. append_composite_type_field (sub, "u",
  1779. init_vector_type (bt->builtin_uint32, 4));
  1780. append_composite_type_field (sub, "s",
  1781. init_vector_type (bt->builtin_int32, 4));
  1782. append_composite_type_field (t, "s", sub);
  1783. sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
  1784. TYPE_CODE_UNION);
  1785. append_composite_type_field (sub, "bf",
  1786. init_vector_type (bt->builtin_bfloat16, 8));
  1787. append_composite_type_field (sub, "f",
  1788. init_vector_type (bt->builtin_half, 8));
  1789. append_composite_type_field (sub, "u",
  1790. init_vector_type (bt->builtin_uint16, 8));
  1791. append_composite_type_field (sub, "s",
  1792. init_vector_type (bt->builtin_int16, 8));
  1793. append_composite_type_field (t, "h", sub);
  1794. sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
  1795. TYPE_CODE_UNION);
  1796. append_composite_type_field (sub, "u",
  1797. init_vector_type (bt->builtin_uint8, 16));
  1798. append_composite_type_field (sub, "s",
  1799. init_vector_type (bt->builtin_int8, 16));
  1800. append_composite_type_field (t, "b", sub);
  1801. sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
  1802. TYPE_CODE_UNION);
  1803. append_composite_type_field (sub, "u",
  1804. init_vector_type (bt->builtin_uint128, 1));
  1805. append_composite_type_field (sub, "s",
  1806. init_vector_type (bt->builtin_int128, 1));
  1807. append_composite_type_field (t, "q", sub);
  1808. tdep->vnv_type = t;
  1809. }
  1810. return tdep->vnv_type;
  1811. }
  1812. /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
  1813. static int
  1814. aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
  1815. {
  1816. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1817. if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
  1818. return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
  1819. if (reg == AARCH64_DWARF_SP)
  1820. return AARCH64_SP_REGNUM;
  1821. if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
  1822. return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
  1823. if (reg == AARCH64_DWARF_SVE_VG)
  1824. return AARCH64_SVE_VG_REGNUM;
  1825. if (reg == AARCH64_DWARF_SVE_FFR)
  1826. return AARCH64_SVE_FFR_REGNUM;
  1827. if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
  1828. return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
  1829. if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
  1830. return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
  1831. if (tdep->has_pauth ())
  1832. {
  1833. if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
  1834. return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
  1835. if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
  1836. return tdep->pauth_ra_state_regnum;
  1837. }
  1838. return -1;
  1839. }
  1840. /* Implement the "print_insn" gdbarch method. */
  1841. static int
  1842. aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
  1843. {
  1844. info->symbols = NULL;
  1845. return default_print_insn (memaddr, info);
  1846. }
  1847. /* AArch64 BRK software debug mode instruction.
  1848. Note that AArch64 code is always little-endian.
  1849. 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
  1850. constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
  1851. typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
  1852. /* Extract from an array REGS containing the (raw) register state a
  1853. function return value of type TYPE, and copy that, in virtual
  1854. format, into VALBUF. */
  1855. static void
  1856. aarch64_extract_return_value (struct type *type, struct regcache *regs,
  1857. gdb_byte *valbuf)
  1858. {
  1859. struct gdbarch *gdbarch = regs->arch ();
  1860. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  1861. int elements;
  1862. struct type *fundamental_type;
  1863. if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
  1864. &fundamental_type))
  1865. {
  1866. int len = TYPE_LENGTH (fundamental_type);
  1867. for (int i = 0; i < elements; i++)
  1868. {
  1869. int regno = AARCH64_V0_REGNUM + i;
  1870. /* Enough space for a full vector register. */
  1871. gdb_byte buf[register_size (gdbarch, regno)];
  1872. gdb_assert (len <= sizeof (buf));
  1873. aarch64_debug_printf
  1874. ("read HFA or HVA return value element %d from %s",
  1875. i + 1, gdbarch_register_name (gdbarch, regno));
  1876. regs->cooked_read (regno, buf);
  1877. memcpy (valbuf, buf, len);
  1878. valbuf += len;
  1879. }
  1880. }
  1881. else if (type->code () == TYPE_CODE_INT
  1882. || type->code () == TYPE_CODE_CHAR
  1883. || type->code () == TYPE_CODE_BOOL
  1884. || type->code () == TYPE_CODE_PTR
  1885. || TYPE_IS_REFERENCE (type)
  1886. || type->code () == TYPE_CODE_ENUM)
  1887. {
  1888. /* If the type is a plain integer, then the access is
  1889. straight-forward. Otherwise we have to play around a bit
  1890. more. */
  1891. int len = TYPE_LENGTH (type);
  1892. int regno = AARCH64_X0_REGNUM;
  1893. ULONGEST tmp;
  1894. while (len > 0)
  1895. {
  1896. /* By using store_unsigned_integer we avoid having to do
  1897. anything special for small big-endian values. */
  1898. regcache_cooked_read_unsigned (regs, regno++, &tmp);
  1899. store_unsigned_integer (valbuf,
  1900. (len > X_REGISTER_SIZE
  1901. ? X_REGISTER_SIZE : len), byte_order, tmp);
  1902. len -= X_REGISTER_SIZE;
  1903. valbuf += X_REGISTER_SIZE;
  1904. }
  1905. }
  1906. else
  1907. {
  1908. /* For a structure or union the behaviour is as if the value had
  1909. been stored to word-aligned memory and then loaded into
  1910. registers with 64-bit load instruction(s). */
  1911. int len = TYPE_LENGTH (type);
  1912. int regno = AARCH64_X0_REGNUM;
  1913. bfd_byte buf[X_REGISTER_SIZE];
  1914. while (len > 0)
  1915. {
  1916. regs->cooked_read (regno++, buf);
  1917. memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
  1918. len -= X_REGISTER_SIZE;
  1919. valbuf += X_REGISTER_SIZE;
  1920. }
  1921. }
  1922. }
  1923. /* Will a function return an aggregate type in memory or in a
  1924. register? Return 0 if an aggregate type can be returned in a
  1925. register, 1 if it must be returned in memory. */
  1926. static int
  1927. aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
  1928. {
  1929. type = check_typedef (type);
  1930. int elements;
  1931. struct type *fundamental_type;
  1932. if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
  1933. &fundamental_type))
  1934. {
  1935. /* v0-v7 are used to return values and one register is allocated
  1936. for one member. However, HFA or HVA has at most four members. */
  1937. return 0;
  1938. }
  1939. if (TYPE_LENGTH (type) > 16
  1940. || !language_pass_by_reference (type).trivially_copyable)
  1941. {
  1942. /* PCS B.6 Aggregates larger than 16 bytes are passed by
  1943. invisible reference. */
  1944. return 1;
  1945. }
  1946. return 0;
  1947. }
  1948. /* Write into appropriate registers a function return value of type
  1949. TYPE, given in virtual format. */
  1950. static void
  1951. aarch64_store_return_value (struct type *type, struct regcache *regs,
  1952. const gdb_byte *valbuf)
  1953. {
  1954. struct gdbarch *gdbarch = regs->arch ();
  1955. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  1956. int elements;
  1957. struct type *fundamental_type;
  1958. if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
  1959. &fundamental_type))
  1960. {
  1961. int len = TYPE_LENGTH (fundamental_type);
  1962. for (int i = 0; i < elements; i++)
  1963. {
  1964. int regno = AARCH64_V0_REGNUM + i;
  1965. /* Enough space for a full vector register. */
  1966. gdb_byte tmpbuf[register_size (gdbarch, regno)];
  1967. gdb_assert (len <= sizeof (tmpbuf));
  1968. aarch64_debug_printf
  1969. ("write HFA or HVA return value element %d to %s",
  1970. i + 1, gdbarch_register_name (gdbarch, regno));
  1971. memcpy (tmpbuf, valbuf,
  1972. len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
  1973. regs->cooked_write (regno, tmpbuf);
  1974. valbuf += len;
  1975. }
  1976. }
  1977. else if (type->code () == TYPE_CODE_INT
  1978. || type->code () == TYPE_CODE_CHAR
  1979. || type->code () == TYPE_CODE_BOOL
  1980. || type->code () == TYPE_CODE_PTR
  1981. || TYPE_IS_REFERENCE (type)
  1982. || type->code () == TYPE_CODE_ENUM)
  1983. {
  1984. if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
  1985. {
  1986. /* Values of one word or less are zero/sign-extended and
  1987. returned in r0. */
  1988. bfd_byte tmpbuf[X_REGISTER_SIZE];
  1989. LONGEST val = unpack_long (type, valbuf);
  1990. store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
  1991. regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
  1992. }
  1993. else
  1994. {
  1995. /* Integral values greater than one word are stored in
  1996. consecutive registers starting with r0. This will always
  1997. be a multiple of the regiser size. */
  1998. int len = TYPE_LENGTH (type);
  1999. int regno = AARCH64_X0_REGNUM;
  2000. while (len > 0)
  2001. {
  2002. regs->cooked_write (regno++, valbuf);
  2003. len -= X_REGISTER_SIZE;
  2004. valbuf += X_REGISTER_SIZE;
  2005. }
  2006. }
  2007. }
  2008. else
  2009. {
  2010. /* For a structure or union the behaviour is as if the value had
  2011. been stored to word-aligned memory and then loaded into
  2012. registers with 64-bit load instruction(s). */
  2013. int len = TYPE_LENGTH (type);
  2014. int regno = AARCH64_X0_REGNUM;
  2015. bfd_byte tmpbuf[X_REGISTER_SIZE];
  2016. while (len > 0)
  2017. {
  2018. memcpy (tmpbuf, valbuf,
  2019. len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
  2020. regs->cooked_write (regno++, tmpbuf);
  2021. len -= X_REGISTER_SIZE;
  2022. valbuf += X_REGISTER_SIZE;
  2023. }
  2024. }
  2025. }
  2026. /* Implement the "return_value" gdbarch method. */
  2027. static enum return_value_convention
  2028. aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
  2029. struct type *valtype, struct regcache *regcache,
  2030. gdb_byte *readbuf, const gdb_byte *writebuf)
  2031. {
  2032. if (valtype->code () == TYPE_CODE_STRUCT
  2033. || valtype->code () == TYPE_CODE_UNION
  2034. || valtype->code () == TYPE_CODE_ARRAY)
  2035. {
  2036. if (aarch64_return_in_memory (gdbarch, valtype))
  2037. {
  2038. /* From the AAPCS64's Result Return section:
  2039. "Otherwise, the caller shall reserve a block of memory of
  2040. sufficient size and alignment to hold the result. The address
  2041. of the memory block shall be passed as an additional argument to
  2042. the function in x8. */
  2043. aarch64_debug_printf ("return value in memory");
  2044. if (readbuf)
  2045. {
  2046. CORE_ADDR addr;
  2047. regcache->cooked_read (AARCH64_STRUCT_RETURN_REGNUM, &addr);
  2048. read_memory (addr, readbuf, TYPE_LENGTH (valtype));
  2049. }
  2050. return RETURN_VALUE_ABI_RETURNS_ADDRESS;
  2051. }
  2052. }
  2053. if (writebuf)
  2054. aarch64_store_return_value (valtype, regcache, writebuf);
  2055. if (readbuf)
  2056. aarch64_extract_return_value (valtype, regcache, readbuf);
  2057. aarch64_debug_printf ("return value in registers");
  2058. return RETURN_VALUE_REGISTER_CONVENTION;
  2059. }
  2060. /* Implement the "get_longjmp_target" gdbarch method. */
  2061. static int
  2062. aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
  2063. {
  2064. CORE_ADDR jb_addr;
  2065. gdb_byte buf[X_REGISTER_SIZE];
  2066. struct gdbarch *gdbarch = get_frame_arch (frame);
  2067. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  2068. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  2069. jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
  2070. if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
  2071. X_REGISTER_SIZE))
  2072. return 0;
  2073. *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
  2074. return 1;
  2075. }
  2076. /* Implement the "gen_return_address" gdbarch method. */
  2077. static void
  2078. aarch64_gen_return_address (struct gdbarch *gdbarch,
  2079. struct agent_expr *ax, struct axs_value *value,
  2080. CORE_ADDR scope)
  2081. {
  2082. value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
  2083. value->kind = axs_lvalue_register;
  2084. value->u.reg = AARCH64_LR_REGNUM;
  2085. }
  2086. /* Return the pseudo register name corresponding to register regnum. */
  2087. static const char *
  2088. aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
  2089. {
  2090. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  2091. static const char *const q_name[] =
  2092. {
  2093. "q0", "q1", "q2", "q3",
  2094. "q4", "q5", "q6", "q7",
  2095. "q8", "q9", "q10", "q11",
  2096. "q12", "q13", "q14", "q15",
  2097. "q16", "q17", "q18", "q19",
  2098. "q20", "q21", "q22", "q23",
  2099. "q24", "q25", "q26", "q27",
  2100. "q28", "q29", "q30", "q31",
  2101. };
  2102. static const char *const d_name[] =
  2103. {
  2104. "d0", "d1", "d2", "d3",
  2105. "d4", "d5", "d6", "d7",
  2106. "d8", "d9", "d10", "d11",
  2107. "d12", "d13", "d14", "d15",
  2108. "d16", "d17", "d18", "d19",
  2109. "d20", "d21", "d22", "d23",
  2110. "d24", "d25", "d26", "d27",
  2111. "d28", "d29", "d30", "d31",
  2112. };
  2113. static const char *const s_name[] =
  2114. {
  2115. "s0", "s1", "s2", "s3",
  2116. "s4", "s5", "s6", "s7",
  2117. "s8", "s9", "s10", "s11",
  2118. "s12", "s13", "s14", "s15",
  2119. "s16", "s17", "s18", "s19",
  2120. "s20", "s21", "s22", "s23",
  2121. "s24", "s25", "s26", "s27",
  2122. "s28", "s29", "s30", "s31",
  2123. };
  2124. static const char *const h_name[] =
  2125. {
  2126. "h0", "h1", "h2", "h3",
  2127. "h4", "h5", "h6", "h7",
  2128. "h8", "h9", "h10", "h11",
  2129. "h12", "h13", "h14", "h15",
  2130. "h16", "h17", "h18", "h19",
  2131. "h20", "h21", "h22", "h23",
  2132. "h24", "h25", "h26", "h27",
  2133. "h28", "h29", "h30", "h31",
  2134. };
  2135. static const char *const b_name[] =
  2136. {
  2137. "b0", "b1", "b2", "b3",
  2138. "b4", "b5", "b6", "b7",
  2139. "b8", "b9", "b10", "b11",
  2140. "b12", "b13", "b14", "b15",
  2141. "b16", "b17", "b18", "b19",
  2142. "b20", "b21", "b22", "b23",
  2143. "b24", "b25", "b26", "b27",
  2144. "b28", "b29", "b30", "b31",
  2145. };
  2146. int p_regnum = regnum - gdbarch_num_regs (gdbarch);
  2147. if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
  2148. return q_name[p_regnum - AARCH64_Q0_REGNUM];
  2149. if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
  2150. return d_name[p_regnum - AARCH64_D0_REGNUM];
  2151. if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
  2152. return s_name[p_regnum - AARCH64_S0_REGNUM];
  2153. if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
  2154. return h_name[p_regnum - AARCH64_H0_REGNUM];
  2155. if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
  2156. return b_name[p_regnum - AARCH64_B0_REGNUM];
  2157. if (tdep->has_sve ())
  2158. {
  2159. static const char *const sve_v_name[] =
  2160. {
  2161. "v0", "v1", "v2", "v3",
  2162. "v4", "v5", "v6", "v7",
  2163. "v8", "v9", "v10", "v11",
  2164. "v12", "v13", "v14", "v15",
  2165. "v16", "v17", "v18", "v19",
  2166. "v20", "v21", "v22", "v23",
  2167. "v24", "v25", "v26", "v27",
  2168. "v28", "v29", "v30", "v31",
  2169. };
  2170. if (p_regnum >= AARCH64_SVE_V0_REGNUM
  2171. && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
  2172. return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
  2173. }
  2174. /* RA_STATE is used for unwinding only. Do not assign it a name - this
  2175. prevents it from being read by methods such as
  2176. mi_cmd_trace_frame_collected. */
  2177. if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
  2178. return "";
  2179. internal_error (__FILE__, __LINE__,
  2180. _("aarch64_pseudo_register_name: bad register number %d"),
  2181. p_regnum);
  2182. }
  2183. /* Implement the "pseudo_register_type" tdesc_arch_data method. */
  2184. static struct type *
  2185. aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
  2186. {
  2187. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  2188. int p_regnum = regnum - gdbarch_num_regs (gdbarch);
  2189. if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
  2190. return aarch64_vnq_type (gdbarch);
  2191. if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
  2192. return aarch64_vnd_type (gdbarch);
  2193. if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
  2194. return aarch64_vns_type (gdbarch);
  2195. if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
  2196. return aarch64_vnh_type (gdbarch);
  2197. if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
  2198. return aarch64_vnb_type (gdbarch);
  2199. if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
  2200. && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
  2201. return aarch64_vnv_type (gdbarch);
  2202. if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
  2203. return builtin_type (gdbarch)->builtin_uint64;
  2204. internal_error (__FILE__, __LINE__,
  2205. _("aarch64_pseudo_register_type: bad register number %d"),
  2206. p_regnum);
  2207. }
  2208. /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
  2209. static int
  2210. aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
  2211. const struct reggroup *group)
  2212. {
  2213. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  2214. int p_regnum = regnum - gdbarch_num_regs (gdbarch);
  2215. if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
  2216. return group == all_reggroup || group == vector_reggroup;
  2217. else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
  2218. return (group == all_reggroup || group == vector_reggroup
  2219. || group == float_reggroup);
  2220. else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
  2221. return (group == all_reggroup || group == vector_reggroup
  2222. || group == float_reggroup);
  2223. else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
  2224. return group == all_reggroup || group == vector_reggroup;
  2225. else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
  2226. return group == all_reggroup || group == vector_reggroup;
  2227. else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
  2228. && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
  2229. return group == all_reggroup || group == vector_reggroup;
  2230. /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
  2231. if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
  2232. return 0;
  2233. return group == all_reggroup;
  2234. }
  2235. /* Helper for aarch64_pseudo_read_value. */
  2236. static struct value *
  2237. aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
  2238. readable_regcache *regcache, int regnum_offset,
  2239. int regsize, struct value *result_value)
  2240. {
  2241. unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
  2242. /* Enough space for a full vector register. */
  2243. gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
  2244. gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
  2245. if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
  2246. mark_value_bytes_unavailable (result_value, 0,
  2247. TYPE_LENGTH (value_type (result_value)));
  2248. else
  2249. memcpy (value_contents_raw (result_value).data (), reg_buf, regsize);
  2250. return result_value;
  2251. }
  2252. /* Implement the "pseudo_register_read_value" gdbarch method. */
  2253. static struct value *
  2254. aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
  2255. int regnum)
  2256. {
  2257. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  2258. struct value *result_value = allocate_value (register_type (gdbarch, regnum));
  2259. VALUE_LVAL (result_value) = lval_register;
  2260. VALUE_REGNUM (result_value) = regnum;
  2261. regnum -= gdbarch_num_regs (gdbarch);
  2262. if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
  2263. return aarch64_pseudo_read_value_1 (gdbarch, regcache,
  2264. regnum - AARCH64_Q0_REGNUM,
  2265. Q_REGISTER_SIZE, result_value);
  2266. if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
  2267. return aarch64_pseudo_read_value_1 (gdbarch, regcache,
  2268. regnum - AARCH64_D0_REGNUM,
  2269. D_REGISTER_SIZE, result_value);
  2270. if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
  2271. return aarch64_pseudo_read_value_1 (gdbarch, regcache,
  2272. regnum - AARCH64_S0_REGNUM,
  2273. S_REGISTER_SIZE, result_value);
  2274. if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
  2275. return aarch64_pseudo_read_value_1 (gdbarch, regcache,
  2276. regnum - AARCH64_H0_REGNUM,
  2277. H_REGISTER_SIZE, result_value);
  2278. if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
  2279. return aarch64_pseudo_read_value_1 (gdbarch, regcache,
  2280. regnum - AARCH64_B0_REGNUM,
  2281. B_REGISTER_SIZE, result_value);
  2282. if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
  2283. && regnum < AARCH64_SVE_V0_REGNUM + 32)
  2284. return aarch64_pseudo_read_value_1 (gdbarch, regcache,
  2285. regnum - AARCH64_SVE_V0_REGNUM,
  2286. V_REGISTER_SIZE, result_value);
  2287. gdb_assert_not_reached ("regnum out of bound");
  2288. }
  2289. /* Helper for aarch64_pseudo_write. */
  2290. static void
  2291. aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
  2292. int regnum_offset, int regsize, const gdb_byte *buf)
  2293. {
  2294. unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
  2295. /* Enough space for a full vector register. */
  2296. gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
  2297. gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
  2298. /* Ensure the register buffer is zero, we want gdb writes of the
  2299. various 'scalar' pseudo registers to behavior like architectural
  2300. writes, register width bytes are written the remainder are set to
  2301. zero. */
  2302. memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
  2303. memcpy (reg_buf, buf, regsize);
  2304. regcache->raw_write (v_regnum, reg_buf);
  2305. }
  2306. /* Implement the "pseudo_register_write" gdbarch method. */
  2307. static void
  2308. aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
  2309. int regnum, const gdb_byte *buf)
  2310. {
  2311. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  2312. regnum -= gdbarch_num_regs (gdbarch);
  2313. if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
  2314. return aarch64_pseudo_write_1 (gdbarch, regcache,
  2315. regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
  2316. buf);
  2317. if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
  2318. return aarch64_pseudo_write_1 (gdbarch, regcache,
  2319. regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
  2320. buf);
  2321. if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
  2322. return aarch64_pseudo_write_1 (gdbarch, regcache,
  2323. regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
  2324. buf);
  2325. if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
  2326. return aarch64_pseudo_write_1 (gdbarch, regcache,
  2327. regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
  2328. buf);
  2329. if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
  2330. return aarch64_pseudo_write_1 (gdbarch, regcache,
  2331. regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
  2332. buf);
  2333. if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
  2334. && regnum < AARCH64_SVE_V0_REGNUM + 32)
  2335. return aarch64_pseudo_write_1 (gdbarch, regcache,
  2336. regnum - AARCH64_SVE_V0_REGNUM,
  2337. V_REGISTER_SIZE, buf);
  2338. gdb_assert_not_reached ("regnum out of bound");
  2339. }
  2340. /* Callback function for user_reg_add. */
  2341. static struct value *
  2342. value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
  2343. {
  2344. const int *reg_p = (const int *) baton;
  2345. return value_of_register (*reg_p, frame);
  2346. }
  2347. /* Implement the "software_single_step" gdbarch method, needed to
  2348. single step through atomic sequences on AArch64. */
  2349. static std::vector<CORE_ADDR>
  2350. aarch64_software_single_step (struct regcache *regcache)
  2351. {
  2352. struct gdbarch *gdbarch = regcache->arch ();
  2353. enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
  2354. const int insn_size = 4;
  2355. const int atomic_sequence_length = 16; /* Instruction sequence length. */
  2356. CORE_ADDR pc = regcache_read_pc (regcache);
  2357. CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
  2358. CORE_ADDR loc = pc;
  2359. CORE_ADDR closing_insn = 0;
  2360. uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
  2361. byte_order_for_code);
  2362. int index;
  2363. int insn_count;
  2364. int bc_insn_count = 0; /* Conditional branch instruction count. */
  2365. int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
  2366. aarch64_inst inst;
  2367. if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
  2368. return {};
  2369. /* Look for a Load Exclusive instruction which begins the sequence. */
  2370. if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
  2371. return {};
  2372. for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
  2373. {
  2374. loc += insn_size;
  2375. insn = read_memory_unsigned_integer (loc, insn_size,
  2376. byte_order_for_code);
  2377. if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
  2378. return {};
  2379. /* Check if the instruction is a conditional branch. */
  2380. if (inst.opcode->iclass == condbranch)
  2381. {
  2382. gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
  2383. if (bc_insn_count >= 1)
  2384. return {};
  2385. /* It is, so we'll try to set a breakpoint at the destination. */
  2386. breaks[1] = loc + inst.operands[0].imm.value;
  2387. bc_insn_count++;
  2388. last_breakpoint++;
  2389. }
  2390. /* Look for the Store Exclusive which closes the atomic sequence. */
  2391. if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
  2392. {
  2393. closing_insn = loc;
  2394. break;
  2395. }
  2396. }
  2397. /* We didn't find a closing Store Exclusive instruction, fall back. */
  2398. if (!closing_insn)
  2399. return {};
  2400. /* Insert breakpoint after the end of the atomic sequence. */
  2401. breaks[0] = loc + insn_size;
  2402. /* Check for duplicated breakpoints, and also check that the second
  2403. breakpoint is not within the atomic sequence. */
  2404. if (last_breakpoint
  2405. && (breaks[1] == breaks[0]
  2406. || (breaks[1] >= pc && breaks[1] <= closing_insn)))
  2407. last_breakpoint = 0;
  2408. std::vector<CORE_ADDR> next_pcs;
  2409. /* Insert the breakpoint at the end of the sequence, and one at the
  2410. destination of the conditional branch, if it exists. */
  2411. for (index = 0; index <= last_breakpoint; index++)
  2412. next_pcs.push_back (breaks[index]);
  2413. return next_pcs;
  2414. }
  2415. struct aarch64_displaced_step_copy_insn_closure
  2416. : public displaced_step_copy_insn_closure
  2417. {
  2418. /* It is true when condition instruction, such as B.CON, TBZ, etc,
  2419. is being displaced stepping. */
  2420. bool cond = false;
  2421. /* PC adjustment offset after displaced stepping. If 0, then we don't
  2422. write the PC back, assuming the PC is already the right address. */
  2423. int32_t pc_adjust = 0;
  2424. };
  2425. /* Data when visiting instructions for displaced stepping. */
  2426. struct aarch64_displaced_step_data
  2427. {
  2428. struct aarch64_insn_data base;
  2429. /* The address where the instruction will be executed at. */
  2430. CORE_ADDR new_addr;
  2431. /* Buffer of instructions to be copied to NEW_ADDR to execute. */
  2432. uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
  2433. /* Number of instructions in INSN_BUF. */
  2434. unsigned insn_count;
  2435. /* Registers when doing displaced stepping. */
  2436. struct regcache *regs;
  2437. aarch64_displaced_step_copy_insn_closure *dsc;
  2438. };
  2439. /* Implementation of aarch64_insn_visitor method "b". */
  2440. static void
  2441. aarch64_displaced_step_b (const int is_bl, const int32_t offset,
  2442. struct aarch64_insn_data *data)
  2443. {
  2444. struct aarch64_displaced_step_data *dsd
  2445. = (struct aarch64_displaced_step_data *) data;
  2446. int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
  2447. if (can_encode_int32 (new_offset, 28))
  2448. {
  2449. /* Emit B rather than BL, because executing BL on a new address
  2450. will get the wrong address into LR. In order to avoid this,
  2451. we emit B, and update LR if the instruction is BL. */
  2452. emit_b (dsd->insn_buf, 0, new_offset);
  2453. dsd->insn_count++;
  2454. }
  2455. else
  2456. {
  2457. /* Write NOP. */
  2458. emit_nop (dsd->insn_buf);
  2459. dsd->insn_count++;
  2460. dsd->dsc->pc_adjust = offset;
  2461. }
  2462. if (is_bl)
  2463. {
  2464. /* Update LR. */
  2465. regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
  2466. data->insn_addr + 4);
  2467. }
  2468. }
  2469. /* Implementation of aarch64_insn_visitor method "b_cond". */
  2470. static void
  2471. aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
  2472. struct aarch64_insn_data *data)
  2473. {
  2474. struct aarch64_displaced_step_data *dsd
  2475. = (struct aarch64_displaced_step_data *) data;
  2476. /* GDB has to fix up PC after displaced step this instruction
  2477. differently according to the condition is true or false. Instead
  2478. of checking COND against conditional flags, we can use
  2479. the following instructions, and GDB can tell how to fix up PC
  2480. according to the PC value.
  2481. B.COND TAKEN ; If cond is true, then jump to TAKEN.
  2482. INSN1 ;
  2483. TAKEN:
  2484. INSN2
  2485. */
  2486. emit_bcond (dsd->insn_buf, cond, 8);
  2487. dsd->dsc->cond = true;
  2488. dsd->dsc->pc_adjust = offset;
  2489. dsd->insn_count = 1;
  2490. }
  2491. /* Dynamically allocate a new register. If we know the register
  2492. statically, we should make it a global as above instead of using this
  2493. helper function. */
  2494. static struct aarch64_register
  2495. aarch64_register (unsigned num, int is64)
  2496. {
  2497. return (struct aarch64_register) { num, is64 };
  2498. }
  2499. /* Implementation of aarch64_insn_visitor method "cb". */
  2500. static void
  2501. aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
  2502. const unsigned rn, int is64,
  2503. struct aarch64_insn_data *data)
  2504. {
  2505. struct aarch64_displaced_step_data *dsd
  2506. = (struct aarch64_displaced_step_data *) data;
  2507. /* The offset is out of range for a compare and branch
  2508. instruction. We can use the following instructions instead:
  2509. CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
  2510. INSN1 ;
  2511. TAKEN:
  2512. INSN2
  2513. */
  2514. emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
  2515. dsd->insn_count = 1;
  2516. dsd->dsc->cond = true;
  2517. dsd->dsc->pc_adjust = offset;
  2518. }
  2519. /* Implementation of aarch64_insn_visitor method "tb". */
  2520. static void
  2521. aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
  2522. const unsigned rt, unsigned bit,
  2523. struct aarch64_insn_data *data)
  2524. {
  2525. struct aarch64_displaced_step_data *dsd
  2526. = (struct aarch64_displaced_step_data *) data;
  2527. /* The offset is out of range for a test bit and branch
  2528. instruction We can use the following instructions instead:
  2529. TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
  2530. INSN1 ;
  2531. TAKEN:
  2532. INSN2
  2533. */
  2534. emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
  2535. dsd->insn_count = 1;
  2536. dsd->dsc->cond = true;
  2537. dsd->dsc->pc_adjust = offset;
  2538. }
  2539. /* Implementation of aarch64_insn_visitor method "adr". */
  2540. static void
  2541. aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
  2542. const int is_adrp, struct aarch64_insn_data *data)
  2543. {
  2544. struct aarch64_displaced_step_data *dsd
  2545. = (struct aarch64_displaced_step_data *) data;
  2546. /* We know exactly the address the ADR{P,} instruction will compute.
  2547. We can just write it to the destination register. */
  2548. CORE_ADDR address = data->insn_addr + offset;
  2549. if (is_adrp)
  2550. {
  2551. /* Clear the lower 12 bits of the offset to get the 4K page. */
  2552. regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
  2553. address & ~0xfff);
  2554. }
  2555. else
  2556. regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
  2557. address);
  2558. dsd->dsc->pc_adjust = 4;
  2559. emit_nop (dsd->insn_buf);
  2560. dsd->insn_count = 1;
  2561. }
  2562. /* Implementation of aarch64_insn_visitor method "ldr_literal". */
  2563. static void
  2564. aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
  2565. const unsigned rt, const int is64,
  2566. struct aarch64_insn_data *data)
  2567. {
  2568. struct aarch64_displaced_step_data *dsd
  2569. = (struct aarch64_displaced_step_data *) data;
  2570. CORE_ADDR address = data->insn_addr + offset;
  2571. struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
  2572. regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
  2573. address);
  2574. if (is_sw)
  2575. dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
  2576. aarch64_register (rt, 1), zero);
  2577. else
  2578. dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
  2579. aarch64_register (rt, 1), zero);
  2580. dsd->dsc->pc_adjust = 4;
  2581. }
  2582. /* Implementation of aarch64_insn_visitor method "others". */
  2583. static void
  2584. aarch64_displaced_step_others (const uint32_t insn,
  2585. struct aarch64_insn_data *data)
  2586. {
  2587. struct aarch64_displaced_step_data *dsd
  2588. = (struct aarch64_displaced_step_data *) data;
  2589. uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
  2590. if (masked_insn == BLR)
  2591. {
  2592. /* Emit a BR to the same register and then update LR to the original
  2593. address (similar to aarch64_displaced_step_b). */
  2594. aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
  2595. regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
  2596. data->insn_addr + 4);
  2597. }
  2598. else
  2599. aarch64_emit_insn (dsd->insn_buf, insn);
  2600. dsd->insn_count = 1;
  2601. if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
  2602. dsd->dsc->pc_adjust = 0;
  2603. else
  2604. dsd->dsc->pc_adjust = 4;
  2605. }
  2606. static const struct aarch64_insn_visitor visitor =
  2607. {
  2608. aarch64_displaced_step_b,
  2609. aarch64_displaced_step_b_cond,
  2610. aarch64_displaced_step_cb,
  2611. aarch64_displaced_step_tb,
  2612. aarch64_displaced_step_adr,
  2613. aarch64_displaced_step_ldr_literal,
  2614. aarch64_displaced_step_others,
  2615. };
  2616. /* Implement the "displaced_step_copy_insn" gdbarch method. */
  2617. displaced_step_copy_insn_closure_up
  2618. aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
  2619. CORE_ADDR from, CORE_ADDR to,
  2620. struct regcache *regs)
  2621. {
  2622. enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
  2623. uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
  2624. struct aarch64_displaced_step_data dsd;
  2625. aarch64_inst inst;
  2626. if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
  2627. return NULL;
  2628. /* Look for a Load Exclusive instruction which begins the sequence. */
  2629. if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
  2630. {
  2631. /* We can't displaced step atomic sequences. */
  2632. return NULL;
  2633. }
  2634. std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
  2635. (new aarch64_displaced_step_copy_insn_closure);
  2636. dsd.base.insn_addr = from;
  2637. dsd.new_addr = to;
  2638. dsd.regs = regs;
  2639. dsd.dsc = dsc.get ();
  2640. dsd.insn_count = 0;
  2641. aarch64_relocate_instruction (insn, &visitor,
  2642. (struct aarch64_insn_data *) &dsd);
  2643. gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
  2644. if (dsd.insn_count != 0)
  2645. {
  2646. int i;
  2647. /* Instruction can be relocated to scratch pad. Copy
  2648. relocated instruction(s) there. */
  2649. for (i = 0; i < dsd.insn_count; i++)
  2650. {
  2651. displaced_debug_printf ("writing insn %.8x at %s",
  2652. dsd.insn_buf[i],
  2653. paddress (gdbarch, to + i * 4));
  2654. write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
  2655. (ULONGEST) dsd.insn_buf[i]);
  2656. }
  2657. }
  2658. else
  2659. {
  2660. dsc = NULL;
  2661. }
  2662. /* This is a work around for a problem with g++ 4.8. */
  2663. return displaced_step_copy_insn_closure_up (dsc.release ());
  2664. }
  2665. /* Implement the "displaced_step_fixup" gdbarch method. */
  2666. void
  2667. aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
  2668. struct displaced_step_copy_insn_closure *dsc_,
  2669. CORE_ADDR from, CORE_ADDR to,
  2670. struct regcache *regs)
  2671. {
  2672. aarch64_displaced_step_copy_insn_closure *dsc
  2673. = (aarch64_displaced_step_copy_insn_closure *) dsc_;
  2674. ULONGEST pc;
  2675. regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
  2676. displaced_debug_printf ("PC after stepping: %s (was %s).",
  2677. paddress (gdbarch, pc), paddress (gdbarch, to));
  2678. if (dsc->cond)
  2679. {
  2680. displaced_debug_printf ("[Conditional] pc_adjust before: %d",
  2681. dsc->pc_adjust);
  2682. if (pc - to == 8)
  2683. {
  2684. /* Condition is true. */
  2685. }
  2686. else if (pc - to == 4)
  2687. {
  2688. /* Condition is false. */
  2689. dsc->pc_adjust = 4;
  2690. }
  2691. else
  2692. gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
  2693. displaced_debug_printf ("[Conditional] pc_adjust after: %d",
  2694. dsc->pc_adjust);
  2695. }
  2696. displaced_debug_printf ("%s PC by %d",
  2697. dsc->pc_adjust ? "adjusting" : "not adjusting",
  2698. dsc->pc_adjust);
  2699. if (dsc->pc_adjust != 0)
  2700. {
  2701. /* Make sure the previous instruction was executed (that is, the PC
  2702. has changed). If the PC didn't change, then discard the adjustment
  2703. offset. Otherwise we may skip an instruction before its execution
  2704. took place. */
  2705. if ((pc - to) == 0)
  2706. {
  2707. displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
  2708. dsc->pc_adjust = 0;
  2709. }
  2710. displaced_debug_printf ("fixup: set PC to %s:%d",
  2711. paddress (gdbarch, from), dsc->pc_adjust);
  2712. regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
  2713. from + dsc->pc_adjust);
  2714. }
  2715. }
  2716. /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
  2717. bool
  2718. aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
  2719. {
  2720. return true;
  2721. }
  2722. /* Get the correct target description for the given VQ value.
  2723. If VQ is zero then it is assumed SVE is not supported.
  2724. (It is not possible to set VQ to zero on an SVE system).
  2725. MTE_P indicates the presence of the Memory Tagging Extension feature. */
  2726. const target_desc *
  2727. aarch64_read_description (uint64_t vq, bool pauth_p, bool mte_p)
  2728. {
  2729. if (vq > AARCH64_MAX_SVE_VQ)
  2730. error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
  2731. AARCH64_MAX_SVE_VQ);
  2732. struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p][mte_p];
  2733. if (tdesc == NULL)
  2734. {
  2735. tdesc = aarch64_create_target_description (vq, pauth_p, mte_p);
  2736. tdesc_aarch64_list[vq][pauth_p][mte_p] = tdesc;
  2737. }
  2738. return tdesc;
  2739. }
  2740. /* Return the VQ used when creating the target description TDESC. */
  2741. static uint64_t
  2742. aarch64_get_tdesc_vq (const struct target_desc *tdesc)
  2743. {
  2744. const struct tdesc_feature *feature_sve;
  2745. if (!tdesc_has_registers (tdesc))
  2746. return 0;
  2747. feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
  2748. if (feature_sve == nullptr)
  2749. return 0;
  2750. uint64_t vl = tdesc_register_bitsize (feature_sve,
  2751. aarch64_sve_register_names[0]) / 8;
  2752. return sve_vq_from_vl (vl);
  2753. }
  2754. /* Implement the "cannot_store_register" gdbarch method. */
  2755. static int
  2756. aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
  2757. {
  2758. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  2759. if (!tdep->has_pauth ())
  2760. return 0;
  2761. /* Pointer authentication registers are read-only. */
  2762. return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
  2763. || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
  2764. }
  2765. /* Implement the stack_frame_destroyed_p gdbarch method. */
  2766. static int
  2767. aarch64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
  2768. {
  2769. CORE_ADDR func_start, func_end;
  2770. if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
  2771. return 0;
  2772. enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
  2773. uint32_t insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code);
  2774. aarch64_inst inst;
  2775. if (aarch64_decode_insn (insn, &inst, 1, nullptr) != 0)
  2776. return 0;
  2777. return streq (inst.opcode->name, "ret");
  2778. }
  2779. /* Initialize the current architecture based on INFO. If possible,
  2780. re-use an architecture from ARCHES, which is a list of
  2781. architectures already created during this debugging session.
  2782. Called e.g. at program startup, when reading a core file, and when
  2783. reading a binary file. */
  2784. static struct gdbarch *
  2785. aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
  2786. {
  2787. const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
  2788. const struct tdesc_feature *feature_pauth;
  2789. bool valid_p = true;
  2790. int i, num_regs = 0, num_pseudo_regs = 0;
  2791. int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
  2792. int first_mte_regnum = -1;
  2793. /* Use the vector length passed via the target info. Here -1 is used for no
  2794. SVE, and 0 is unset. If unset then use the vector length from the existing
  2795. tdesc. */
  2796. uint64_t vq = 0;
  2797. if (info.id == (int *) -1)
  2798. vq = 0;
  2799. else if (info.id != 0)
  2800. vq = (uint64_t) info.id;
  2801. else
  2802. vq = aarch64_get_tdesc_vq (info.target_desc);
  2803. if (vq > AARCH64_MAX_SVE_VQ)
  2804. internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
  2805. pulongest (vq), AARCH64_MAX_SVE_VQ);
  2806. /* If there is already a candidate, use it. */
  2807. for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
  2808. best_arch != nullptr;
  2809. best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
  2810. {
  2811. aarch64_gdbarch_tdep *tdep
  2812. = (aarch64_gdbarch_tdep *) gdbarch_tdep (best_arch->gdbarch);
  2813. if (tdep && tdep->vq == vq)
  2814. return best_arch->gdbarch;
  2815. }
  2816. /* Ensure we always have a target descriptor, and that it is for the given VQ
  2817. value. */
  2818. const struct target_desc *tdesc = info.target_desc;
  2819. if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
  2820. tdesc = aarch64_read_description (vq, false, false);
  2821. gdb_assert (tdesc);
  2822. feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
  2823. feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
  2824. feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
  2825. feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
  2826. const struct tdesc_feature *feature_mte
  2827. = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
  2828. if (feature_core == nullptr)
  2829. return nullptr;
  2830. tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
  2831. /* Validate the description provides the mandatory core R registers
  2832. and allocate their numbers. */
  2833. for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
  2834. valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
  2835. AARCH64_X0_REGNUM + i,
  2836. aarch64_r_register_names[i]);
  2837. num_regs = AARCH64_X0_REGNUM + i;
  2838. /* Add the V registers. */
  2839. if (feature_fpu != nullptr)
  2840. {
  2841. if (feature_sve != nullptr)
  2842. error (_("Program contains both fpu and SVE features."));
  2843. /* Validate the description provides the mandatory V registers
  2844. and allocate their numbers. */
  2845. for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
  2846. valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
  2847. AARCH64_V0_REGNUM + i,
  2848. aarch64_v_register_names[i]);
  2849. num_regs = AARCH64_V0_REGNUM + i;
  2850. }
  2851. /* Add the SVE registers. */
  2852. if (feature_sve != nullptr)
  2853. {
  2854. /* Validate the description provides the mandatory SVE registers
  2855. and allocate their numbers. */
  2856. for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
  2857. valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
  2858. AARCH64_SVE_Z0_REGNUM + i,
  2859. aarch64_sve_register_names[i]);
  2860. num_regs = AARCH64_SVE_Z0_REGNUM + i;
  2861. num_pseudo_regs += 32; /* add the Vn register pseudos. */
  2862. }
  2863. if (feature_fpu != nullptr || feature_sve != nullptr)
  2864. {
  2865. num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
  2866. num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
  2867. num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
  2868. num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
  2869. num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
  2870. }
  2871. /* Add the pauth registers. */
  2872. if (feature_pauth != NULL)
  2873. {
  2874. first_pauth_regnum = num_regs;
  2875. pauth_ra_state_offset = num_pseudo_regs;
  2876. /* Validate the descriptor provides the mandatory PAUTH registers and
  2877. allocate their numbers. */
  2878. for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
  2879. valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
  2880. first_pauth_regnum + i,
  2881. aarch64_pauth_register_names[i]);
  2882. num_regs += i;
  2883. num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
  2884. }
  2885. /* Add the MTE registers. */
  2886. if (feature_mte != NULL)
  2887. {
  2888. first_mte_regnum = num_regs;
  2889. /* Validate the descriptor provides the mandatory MTE registers and
  2890. allocate their numbers. */
  2891. for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
  2892. valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
  2893. first_mte_regnum + i,
  2894. aarch64_mte_register_names[i]);
  2895. num_regs += i;
  2896. }
  2897. if (!valid_p)
  2898. return nullptr;
  2899. /* AArch64 code is always little-endian. */
  2900. info.byte_order_for_code = BFD_ENDIAN_LITTLE;
  2901. aarch64_gdbarch_tdep *tdep = new aarch64_gdbarch_tdep;
  2902. struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
  2903. /* This should be low enough for everything. */
  2904. tdep->lowest_pc = 0x20;
  2905. tdep->jb_pc = -1; /* Longjump support not enabled by default. */
  2906. tdep->jb_elt_size = 8;
  2907. tdep->vq = vq;
  2908. tdep->pauth_reg_base = first_pauth_regnum;
  2909. tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
  2910. : pauth_ra_state_offset + num_regs;
  2911. tdep->mte_reg_base = first_mte_regnum;
  2912. set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
  2913. set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
  2914. /* Advance PC across function entry code. */
  2915. set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
  2916. /* The stack grows downward. */
  2917. set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
  2918. /* Breakpoint manipulation. */
  2919. set_gdbarch_breakpoint_kind_from_pc (gdbarch,
  2920. aarch64_breakpoint::kind_from_pc);
  2921. set_gdbarch_sw_breakpoint_from_kind (gdbarch,
  2922. aarch64_breakpoint::bp_from_kind);
  2923. set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
  2924. set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
  2925. /* Information about registers, etc. */
  2926. set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
  2927. set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
  2928. set_gdbarch_num_regs (gdbarch, num_regs);
  2929. set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
  2930. set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
  2931. set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
  2932. set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
  2933. set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
  2934. set_tdesc_pseudo_register_reggroup_p (gdbarch,
  2935. aarch64_pseudo_register_reggroup_p);
  2936. set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
  2937. /* ABI */
  2938. set_gdbarch_short_bit (gdbarch, 16);
  2939. set_gdbarch_int_bit (gdbarch, 32);
  2940. set_gdbarch_float_bit (gdbarch, 32);
  2941. set_gdbarch_double_bit (gdbarch, 64);
  2942. set_gdbarch_long_double_bit (gdbarch, 128);
  2943. set_gdbarch_long_bit (gdbarch, 64);
  2944. set_gdbarch_long_long_bit (gdbarch, 64);
  2945. set_gdbarch_ptr_bit (gdbarch, 64);
  2946. set_gdbarch_char_signed (gdbarch, 0);
  2947. set_gdbarch_wchar_signed (gdbarch, 0);
  2948. set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
  2949. set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
  2950. set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
  2951. set_gdbarch_type_align (gdbarch, aarch64_type_align);
  2952. /* Detect whether PC is at a point where the stack has been destroyed. */
  2953. set_gdbarch_stack_frame_destroyed_p (gdbarch, aarch64_stack_frame_destroyed_p);
  2954. /* Internal <-> external register number maps. */
  2955. set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
  2956. /* Returning results. */
  2957. set_gdbarch_return_value (gdbarch, aarch64_return_value);
  2958. /* Disassembly. */
  2959. set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
  2960. /* Virtual tables. */
  2961. set_gdbarch_vbit_in_delta (gdbarch, 1);
  2962. /* Hook in the ABI-specific overrides, if they have been registered. */
  2963. info.target_desc = tdesc;
  2964. info.tdesc_data = tdesc_data.get ();
  2965. gdbarch_init_osabi (info, gdbarch);
  2966. dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
  2967. /* Register DWARF CFA vendor handler. */
  2968. set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
  2969. aarch64_execute_dwarf_cfa_vendor_op);
  2970. /* Permanent/Program breakpoint handling. */
  2971. set_gdbarch_program_breakpoint_here_p (gdbarch,
  2972. aarch64_program_breakpoint_here_p);
  2973. /* Add some default predicates. */
  2974. frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
  2975. dwarf2_append_unwinders (gdbarch);
  2976. frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
  2977. frame_base_set_default (gdbarch, &aarch64_normal_base);
  2978. /* Now we have tuned the configuration, set a few final things,
  2979. based on what the OS ABI has told us. */
  2980. if (tdep->jb_pc >= 0)
  2981. set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
  2982. set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
  2983. set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
  2984. tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
  2985. /* Add standard register aliases. */
  2986. for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
  2987. user_reg_add (gdbarch, aarch64_register_aliases[i].name,
  2988. value_of_aarch64_user_reg,
  2989. &aarch64_register_aliases[i].regnum);
  2990. register_aarch64_ravenscar_ops (gdbarch);
  2991. return gdbarch;
  2992. }
  2993. static void
  2994. aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
  2995. {
  2996. aarch64_gdbarch_tdep *tdep = (aarch64_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  2997. if (tdep == NULL)
  2998. return;
  2999. gdb_printf (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
  3000. paddress (gdbarch, tdep->lowest_pc));
  3001. }
  3002. #if GDB_SELF_TEST
  3003. namespace selftests
  3004. {
  3005. static void aarch64_process_record_test (void);
  3006. }
  3007. #endif
  3008. void _initialize_aarch64_tdep ();
  3009. void
  3010. _initialize_aarch64_tdep ()
  3011. {
  3012. gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
  3013. aarch64_dump_tdep);
  3014. /* Debug this file's internals. */
  3015. add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
  3016. Set AArch64 debugging."), _("\
  3017. Show AArch64 debugging."), _("\
  3018. When on, AArch64 specific debugging is enabled."),
  3019. NULL,
  3020. show_aarch64_debug,
  3021. &setdebuglist, &showdebuglist);
  3022. #if GDB_SELF_TEST
  3023. selftests::register_test ("aarch64-analyze-prologue",
  3024. selftests::aarch64_analyze_prologue_test);
  3025. selftests::register_test ("aarch64-process-record",
  3026. selftests::aarch64_process_record_test);
  3027. #endif
  3028. }
  3029. /* AArch64 process record-replay related structures, defines etc. */
  3030. #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
  3031. do \
  3032. { \
  3033. unsigned int reg_len = LENGTH; \
  3034. if (reg_len) \
  3035. { \
  3036. REGS = XNEWVEC (uint32_t, reg_len); \
  3037. memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
  3038. } \
  3039. } \
  3040. while (0)
  3041. #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
  3042. do \
  3043. { \
  3044. unsigned int mem_len = LENGTH; \
  3045. if (mem_len) \
  3046. { \
  3047. MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
  3048. memcpy(MEMS, &RECORD_BUF[0], \
  3049. sizeof(struct aarch64_mem_r) * LENGTH); \
  3050. } \
  3051. } \
  3052. while (0)
  3053. /* AArch64 record/replay structures and enumerations. */
  3054. struct aarch64_mem_r
  3055. {
  3056. uint64_t len; /* Record length. */
  3057. uint64_t addr; /* Memory address. */
  3058. };
  3059. enum aarch64_record_result
  3060. {
  3061. AARCH64_RECORD_SUCCESS,
  3062. AARCH64_RECORD_UNSUPPORTED,
  3063. AARCH64_RECORD_UNKNOWN
  3064. };
  3065. typedef struct insn_decode_record_t
  3066. {
  3067. struct gdbarch *gdbarch;
  3068. struct regcache *regcache;
  3069. CORE_ADDR this_addr; /* Address of insn to be recorded. */
  3070. uint32_t aarch64_insn; /* Insn to be recorded. */
  3071. uint32_t mem_rec_count; /* Count of memory records. */
  3072. uint32_t reg_rec_count; /* Count of register records. */
  3073. uint32_t *aarch64_regs; /* Registers to be recorded. */
  3074. struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
  3075. } insn_decode_record;
  3076. /* Record handler for data processing - register instructions. */
  3077. static unsigned int
  3078. aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
  3079. {
  3080. uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
  3081. uint32_t record_buf[4];
  3082. reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
  3083. insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
  3084. insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
  3085. if (!bit (aarch64_insn_r->aarch64_insn, 28))
  3086. {
  3087. uint8_t setflags;
  3088. /* Logical (shifted register). */
  3089. if (insn_bits24_27 == 0x0a)
  3090. setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
  3091. /* Add/subtract. */
  3092. else if (insn_bits24_27 == 0x0b)
  3093. setflags = bit (aarch64_insn_r->aarch64_insn, 29);
  3094. else
  3095. return AARCH64_RECORD_UNKNOWN;
  3096. record_buf[0] = reg_rd;
  3097. aarch64_insn_r->reg_rec_count = 1;
  3098. if (setflags)
  3099. record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
  3100. }
  3101. else
  3102. {
  3103. if (insn_bits24_27 == 0x0b)
  3104. {
  3105. /* Data-processing (3 source). */
  3106. record_buf[0] = reg_rd;
  3107. aarch64_insn_r->reg_rec_count = 1;
  3108. }
  3109. else if (insn_bits24_27 == 0x0a)
  3110. {
  3111. if (insn_bits21_23 == 0x00)
  3112. {
  3113. /* Add/subtract (with carry). */
  3114. record_buf[0] = reg_rd;
  3115. aarch64_insn_r->reg_rec_count = 1;
  3116. if (bit (aarch64_insn_r->aarch64_insn, 29))
  3117. {
  3118. record_buf[1] = AARCH64_CPSR_REGNUM;
  3119. aarch64_insn_r->reg_rec_count = 2;
  3120. }
  3121. }
  3122. else if (insn_bits21_23 == 0x02)
  3123. {
  3124. /* Conditional compare (register) and conditional compare
  3125. (immediate) instructions. */
  3126. record_buf[0] = AARCH64_CPSR_REGNUM;
  3127. aarch64_insn_r->reg_rec_count = 1;
  3128. }
  3129. else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
  3130. {
  3131. /* Conditional select. */
  3132. /* Data-processing (2 source). */
  3133. /* Data-processing (1 source). */
  3134. record_buf[0] = reg_rd;
  3135. aarch64_insn_r->reg_rec_count = 1;
  3136. }
  3137. else
  3138. return AARCH64_RECORD_UNKNOWN;
  3139. }
  3140. }
  3141. REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
  3142. record_buf);
  3143. return AARCH64_RECORD_SUCCESS;
  3144. }
  3145. /* Record handler for data processing - immediate instructions. */
  3146. static unsigned int
  3147. aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
  3148. {
  3149. uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
  3150. uint32_t record_buf[4];
  3151. reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
  3152. insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
  3153. insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
  3154. if (insn_bits24_27 == 0x00 /* PC rel addressing. */
  3155. || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
  3156. || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
  3157. {
  3158. record_buf[0] = reg_rd;
  3159. aarch64_insn_r->reg_rec_count = 1;
  3160. }
  3161. else if (insn_bits24_27 == 0x01)
  3162. {
  3163. /* Add/Subtract (immediate). */
  3164. setflags = bit (aarch64_insn_r->aarch64_insn, 29);
  3165. record_buf[0] = reg_rd;
  3166. aarch64_insn_r->reg_rec_count = 1;
  3167. if (setflags)
  3168. record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
  3169. }
  3170. else if (insn_bits24_27 == 0x02 && !insn_bit23)
  3171. {
  3172. /* Logical (immediate). */
  3173. setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
  3174. record_buf[0] = reg_rd;
  3175. aarch64_insn_r->reg_rec_count = 1;
  3176. if (setflags)
  3177. record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
  3178. }
  3179. else
  3180. return AARCH64_RECORD_UNKNOWN;
  3181. REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
  3182. record_buf);
  3183. return AARCH64_RECORD_SUCCESS;
  3184. }
  3185. /* Record handler for branch, exception generation and system instructions. */
  3186. static unsigned int
  3187. aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
  3188. {
  3189. aarch64_gdbarch_tdep *tdep
  3190. = (aarch64_gdbarch_tdep *) gdbarch_tdep (aarch64_insn_r->gdbarch);
  3191. uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
  3192. uint32_t record_buf[4];
  3193. insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
  3194. insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
  3195. insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
  3196. if (insn_bits28_31 == 0x0d)
  3197. {
  3198. /* Exception generation instructions. */
  3199. if (insn_bits24_27 == 0x04)
  3200. {
  3201. if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
  3202. && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
  3203. && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
  3204. {
  3205. ULONGEST svc_number;
  3206. regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
  3207. &svc_number);
  3208. return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
  3209. svc_number);
  3210. }
  3211. else
  3212. return AARCH64_RECORD_UNSUPPORTED;
  3213. }
  3214. /* System instructions. */
  3215. else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
  3216. {
  3217. uint32_t reg_rt, reg_crn;
  3218. reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
  3219. reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
  3220. /* Record rt in case of sysl and mrs instructions. */
  3221. if (bit (aarch64_insn_r->aarch64_insn, 21))
  3222. {
  3223. record_buf[0] = reg_rt;
  3224. aarch64_insn_r->reg_rec_count = 1;
  3225. }
  3226. /* Record cpsr for hint and msr(immediate) instructions. */
  3227. else if (reg_crn == 0x02 || reg_crn == 0x04)
  3228. {
  3229. record_buf[0] = AARCH64_CPSR_REGNUM;
  3230. aarch64_insn_r->reg_rec_count = 1;
  3231. }
  3232. }
  3233. /* Unconditional branch (register). */
  3234. else if((insn_bits24_27 & 0x0e) == 0x06)
  3235. {
  3236. record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
  3237. if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
  3238. record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
  3239. }
  3240. else
  3241. return AARCH64_RECORD_UNKNOWN;
  3242. }
  3243. /* Unconditional branch (immediate). */
  3244. else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
  3245. {
  3246. record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
  3247. if (bit (aarch64_insn_r->aarch64_insn, 31))
  3248. record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
  3249. }
  3250. else
  3251. /* Compare & branch (immediate), Test & branch (immediate) and
  3252. Conditional branch (immediate). */
  3253. record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
  3254. REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
  3255. record_buf);
  3256. return AARCH64_RECORD_SUCCESS;
  3257. }
  3258. /* Record handler for advanced SIMD load and store instructions. */
  3259. static unsigned int
  3260. aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
  3261. {
  3262. CORE_ADDR address;
  3263. uint64_t addr_offset = 0;
  3264. uint32_t record_buf[24];
  3265. uint64_t record_buf_mem[24];
  3266. uint32_t reg_rn, reg_rt;
  3267. uint32_t reg_index = 0, mem_index = 0;
  3268. uint8_t opcode_bits, size_bits;
  3269. reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
  3270. reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
  3271. size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
  3272. opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
  3273. regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
  3274. if (record_debug)
  3275. debug_printf ("Process record: Advanced SIMD load/store\n");
  3276. /* Load/store single structure. */
  3277. if (bit (aarch64_insn_r->aarch64_insn, 24))
  3278. {
  3279. uint8_t sindex, scale, selem, esize, replicate = 0;
  3280. scale = opcode_bits >> 2;
  3281. selem = ((opcode_bits & 0x02) |
  3282. bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
  3283. switch (scale)
  3284. {
  3285. case 1:
  3286. if (size_bits & 0x01)
  3287. return AARCH64_RECORD_UNKNOWN;
  3288. break;
  3289. case 2:
  3290. if ((size_bits >> 1) & 0x01)
  3291. return AARCH64_RECORD_UNKNOWN;
  3292. if (size_bits & 0x01)
  3293. {
  3294. if (!((opcode_bits >> 1) & 0x01))
  3295. scale = 3;
  3296. else
  3297. return AARCH64_RECORD_UNKNOWN;
  3298. }
  3299. break;
  3300. case 3:
  3301. if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
  3302. {
  3303. scale = size_bits;
  3304. replicate = 1;
  3305. break;
  3306. }
  3307. else
  3308. return AARCH64_RECORD_UNKNOWN;
  3309. default:
  3310. break;
  3311. }
  3312. esize = 8 << scale;
  3313. if (replicate)
  3314. for (sindex = 0; sindex < selem; sindex++)
  3315. {
  3316. record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
  3317. reg_rt = (reg_rt + 1) % 32;
  3318. }
  3319. else
  3320. {
  3321. for (sindex = 0; sindex < selem; sindex++)
  3322. {
  3323. if (bit (aarch64_insn_r->aarch64_insn, 22))
  3324. record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
  3325. else
  3326. {
  3327. record_buf_mem[mem_index++] = esize / 8;
  3328. record_buf_mem[mem_index++] = address + addr_offset;
  3329. }
  3330. addr_offset = addr_offset + (esize / 8);
  3331. reg_rt = (reg_rt + 1) % 32;
  3332. }
  3333. }
  3334. }
  3335. /* Load/store multiple structure. */
  3336. else
  3337. {
  3338. uint8_t selem, esize, rpt, elements;
  3339. uint8_t eindex, rindex;
  3340. esize = 8 << size_bits;
  3341. if (bit (aarch64_insn_r->aarch64_insn, 30))
  3342. elements = 128 / esize;
  3343. else
  3344. elements = 64 / esize;
  3345. switch (opcode_bits)
  3346. {
  3347. /*LD/ST4 (4 Registers). */
  3348. case 0:
  3349. rpt = 1;
  3350. selem = 4;
  3351. break;
  3352. /*LD/ST1 (4 Registers). */
  3353. case 2:
  3354. rpt = 4;
  3355. selem = 1;
  3356. break;
  3357. /*LD/ST3 (3 Registers). */
  3358. case 4:
  3359. rpt = 1;
  3360. selem = 3;
  3361. break;
  3362. /*LD/ST1 (3 Registers). */
  3363. case 6:
  3364. rpt = 3;
  3365. selem = 1;
  3366. break;
  3367. /*LD/ST1 (1 Register). */
  3368. case 7:
  3369. rpt = 1;
  3370. selem = 1;
  3371. break;
  3372. /*LD/ST2 (2 Registers). */
  3373. case 8:
  3374. rpt = 1;
  3375. selem = 2;
  3376. break;
  3377. /*LD/ST1 (2 Registers). */
  3378. case 10:
  3379. rpt = 2;
  3380. selem = 1;
  3381. break;
  3382. default:
  3383. return AARCH64_RECORD_UNSUPPORTED;
  3384. break;
  3385. }
  3386. for (rindex = 0; rindex < rpt; rindex++)
  3387. for (eindex = 0; eindex < elements; eindex++)
  3388. {
  3389. uint8_t reg_tt, sindex;
  3390. reg_tt = (reg_rt + rindex) % 32;
  3391. for (sindex = 0; sindex < selem; sindex++)
  3392. {
  3393. if (bit (aarch64_insn_r->aarch64_insn, 22))
  3394. record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
  3395. else
  3396. {
  3397. record_buf_mem[mem_index++] = esize / 8;
  3398. record_buf_mem[mem_index++] = address + addr_offset;
  3399. }
  3400. addr_offset = addr_offset + (esize / 8);
  3401. reg_tt = (reg_tt + 1) % 32;
  3402. }
  3403. }
  3404. }
  3405. if (bit (aarch64_insn_r->aarch64_insn, 23))
  3406. record_buf[reg_index++] = reg_rn;
  3407. aarch64_insn_r->reg_rec_count = reg_index;
  3408. aarch64_insn_r->mem_rec_count = mem_index / 2;
  3409. MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
  3410. record_buf_mem);
  3411. REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
  3412. record_buf);
  3413. return AARCH64_RECORD_SUCCESS;
  3414. }
  3415. /* Record handler for load and store instructions. */
  3416. static unsigned int
  3417. aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
  3418. {
  3419. uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
  3420. uint8_t insn_bit23, insn_bit21;
  3421. uint8_t opc, size_bits, ld_flag, vector_flag;
  3422. uint32_t reg_rn, reg_rt, reg_rt2;
  3423. uint64_t datasize, offset;
  3424. uint32_t record_buf[8];
  3425. uint64_t record_buf_mem[8];
  3426. CORE_ADDR address;
  3427. insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
  3428. insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
  3429. insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
  3430. insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
  3431. insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
  3432. ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
  3433. vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
  3434. reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
  3435. reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
  3436. reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
  3437. size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
  3438. /* Load/store exclusive. */
  3439. if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
  3440. {
  3441. if (record_debug)
  3442. debug_printf ("Process record: load/store exclusive\n");
  3443. if (ld_flag)
  3444. {
  3445. record_buf[0] = reg_rt;
  3446. aarch64_insn_r->reg_rec_count = 1;
  3447. if (insn_bit21)
  3448. {
  3449. record_buf[1] = reg_rt2;
  3450. aarch64_insn_r->reg_rec_count = 2;
  3451. }
  3452. }
  3453. else
  3454. {
  3455. if (insn_bit21)
  3456. datasize = (8 << size_bits) * 2;
  3457. else
  3458. datasize = (8 << size_bits);
  3459. regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
  3460. &address);
  3461. record_buf_mem[0] = datasize / 8;
  3462. record_buf_mem[1] = address;
  3463. aarch64_insn_r->mem_rec_count = 1;
  3464. if (!insn_bit23)
  3465. {
  3466. /* Save register rs. */
  3467. record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
  3468. aarch64_insn_r->reg_rec_count = 1;
  3469. }
  3470. }
  3471. }
  3472. /* Load register (literal) instructions decoding. */
  3473. else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
  3474. {
  3475. if (record_debug)
  3476. debug_printf ("Process record: load register (literal)\n");
  3477. if (vector_flag)
  3478. record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
  3479. else
  3480. record_buf[0] = reg_rt;
  3481. aarch64_insn_r->reg_rec_count = 1;
  3482. }
  3483. /* All types of load/store pair instructions decoding. */
  3484. else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
  3485. {
  3486. if (record_debug)
  3487. debug_printf ("Process record: load/store pair\n");
  3488. if (ld_flag)
  3489. {
  3490. if (vector_flag)
  3491. {
  3492. record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
  3493. record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
  3494. }
  3495. else
  3496. {
  3497. record_buf[0] = reg_rt;
  3498. record_buf[1] = reg_rt2;
  3499. }
  3500. aarch64_insn_r->reg_rec_count = 2;
  3501. }
  3502. else
  3503. {
  3504. uint16_t imm7_off;
  3505. imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
  3506. if (!vector_flag)
  3507. size_bits = size_bits >> 1;
  3508. datasize = 8 << (2 + size_bits);
  3509. offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
  3510. offset = offset << (2 + size_bits);
  3511. regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
  3512. &address);
  3513. if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
  3514. {
  3515. if (imm7_off & 0x40)
  3516. address = address - offset;
  3517. else
  3518. address = address + offset;
  3519. }
  3520. record_buf_mem[0] = datasize / 8;
  3521. record_buf_mem[1] = address;
  3522. record_buf_mem[2] = datasize / 8;
  3523. record_buf_mem[3] = address + (datasize / 8);
  3524. aarch64_insn_r->mem_rec_count = 2;
  3525. }
  3526. if (bit (aarch64_insn_r->aarch64_insn, 23))
  3527. record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
  3528. }
  3529. /* Load/store register (unsigned immediate) instructions. */
  3530. else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
  3531. {
  3532. opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
  3533. if (!(opc >> 1))
  3534. {
  3535. if (opc & 0x01)
  3536. ld_flag = 0x01;
  3537. else
  3538. ld_flag = 0x0;
  3539. }
  3540. else
  3541. {
  3542. if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
  3543. {
  3544. /* PRFM (immediate) */
  3545. return AARCH64_RECORD_SUCCESS;
  3546. }
  3547. else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
  3548. {
  3549. /* LDRSW (immediate) */
  3550. ld_flag = 0x1;
  3551. }
  3552. else
  3553. {
  3554. if (opc & 0x01)
  3555. ld_flag = 0x01;
  3556. else
  3557. ld_flag = 0x0;
  3558. }
  3559. }
  3560. if (record_debug)
  3561. {
  3562. debug_printf ("Process record: load/store (unsigned immediate):"
  3563. " size %x V %d opc %x\n", size_bits, vector_flag,
  3564. opc);
  3565. }
  3566. if (!ld_flag)
  3567. {
  3568. offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
  3569. datasize = 8 << size_bits;
  3570. regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
  3571. &address);
  3572. offset = offset << size_bits;
  3573. address = address + offset;
  3574. record_buf_mem[0] = datasize >> 3;
  3575. record_buf_mem[1] = address;
  3576. aarch64_insn_r->mem_rec_count = 1;
  3577. }
  3578. else
  3579. {
  3580. if (vector_flag)
  3581. record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
  3582. else
  3583. record_buf[0] = reg_rt;
  3584. aarch64_insn_r->reg_rec_count = 1;
  3585. }
  3586. }
  3587. /* Load/store register (register offset) instructions. */
  3588. else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
  3589. && insn_bits10_11 == 0x02 && insn_bit21)
  3590. {
  3591. if (record_debug)
  3592. debug_printf ("Process record: load/store (register offset)\n");
  3593. opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
  3594. if (!(opc >> 1))
  3595. if (opc & 0x01)
  3596. ld_flag = 0x01;
  3597. else
  3598. ld_flag = 0x0;
  3599. else
  3600. if (size_bits != 0x03)
  3601. ld_flag = 0x01;
  3602. else
  3603. return AARCH64_RECORD_UNKNOWN;
  3604. if (!ld_flag)
  3605. {
  3606. ULONGEST reg_rm_val;
  3607. regcache_raw_read_unsigned (aarch64_insn_r->regcache,
  3608. bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
  3609. if (bit (aarch64_insn_r->aarch64_insn, 12))
  3610. offset = reg_rm_val << size_bits;
  3611. else
  3612. offset = reg_rm_val;
  3613. datasize = 8 << size_bits;
  3614. regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
  3615. &address);
  3616. address = address + offset;
  3617. record_buf_mem[0] = datasize >> 3;
  3618. record_buf_mem[1] = address;
  3619. aarch64_insn_r->mem_rec_count = 1;
  3620. }
  3621. else
  3622. {
  3623. if (vector_flag)
  3624. record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
  3625. else
  3626. record_buf[0] = reg_rt;
  3627. aarch64_insn_r->reg_rec_count = 1;
  3628. }
  3629. }
  3630. /* Load/store register (immediate and unprivileged) instructions. */
  3631. else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
  3632. && !insn_bit21)
  3633. {
  3634. if (record_debug)
  3635. {
  3636. debug_printf ("Process record: load/store "
  3637. "(immediate and unprivileged)\n");
  3638. }
  3639. opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
  3640. if (!(opc >> 1))
  3641. if (opc & 0x01)
  3642. ld_flag = 0x01;
  3643. else
  3644. ld_flag = 0x0;
  3645. else
  3646. if (size_bits != 0x03)
  3647. ld_flag = 0x01;
  3648. else
  3649. return AARCH64_RECORD_UNKNOWN;
  3650. if (!ld_flag)
  3651. {
  3652. uint16_t imm9_off;
  3653. imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
  3654. offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
  3655. datasize = 8 << size_bits;
  3656. regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
  3657. &address);
  3658. if (insn_bits10_11 != 0x01)
  3659. {
  3660. if (imm9_off & 0x0100)
  3661. address = address - offset;
  3662. else
  3663. address = address + offset;
  3664. }
  3665. record_buf_mem[0] = datasize >> 3;
  3666. record_buf_mem[1] = address;
  3667. aarch64_insn_r->mem_rec_count = 1;
  3668. }
  3669. else
  3670. {
  3671. if (vector_flag)
  3672. record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
  3673. else
  3674. record_buf[0] = reg_rt;
  3675. aarch64_insn_r->reg_rec_count = 1;
  3676. }
  3677. if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
  3678. record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
  3679. }
  3680. /* Advanced SIMD load/store instructions. */
  3681. else
  3682. return aarch64_record_asimd_load_store (aarch64_insn_r);
  3683. MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
  3684. record_buf_mem);
  3685. REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
  3686. record_buf);
  3687. return AARCH64_RECORD_SUCCESS;
  3688. }
  3689. /* Record handler for data processing SIMD and floating point instructions. */
  3690. static unsigned int
  3691. aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
  3692. {
  3693. uint8_t insn_bit21, opcode, rmode, reg_rd;
  3694. uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
  3695. uint8_t insn_bits11_14;
  3696. uint32_t record_buf[2];
  3697. insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
  3698. insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
  3699. insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
  3700. insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
  3701. insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
  3702. opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
  3703. rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
  3704. reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
  3705. insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
  3706. if (record_debug)
  3707. debug_printf ("Process record: data processing SIMD/FP: ");
  3708. if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
  3709. {
  3710. /* Floating point - fixed point conversion instructions. */
  3711. if (!insn_bit21)
  3712. {
  3713. if (record_debug)
  3714. debug_printf ("FP - fixed point conversion");
  3715. if ((opcode >> 1) == 0x0 && rmode == 0x03)
  3716. record_buf[0] = reg_rd;
  3717. else
  3718. record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
  3719. }
  3720. /* Floating point - conditional compare instructions. */
  3721. else if (insn_bits10_11 == 0x01)
  3722. {
  3723. if (record_debug)
  3724. debug_printf ("FP - conditional compare");
  3725. record_buf[0] = AARCH64_CPSR_REGNUM;
  3726. }
  3727. /* Floating point - data processing (2-source) and
  3728. conditional select instructions. */
  3729. else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
  3730. {
  3731. if (record_debug)
  3732. debug_printf ("FP - DP (2-source)");
  3733. record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
  3734. }
  3735. else if (insn_bits10_11 == 0x00)
  3736. {
  3737. /* Floating point - immediate instructions. */
  3738. if ((insn_bits12_15 & 0x01) == 0x01
  3739. || (insn_bits12_15 & 0x07) == 0x04)
  3740. {
  3741. if (record_debug)
  3742. debug_printf ("FP - immediate");
  3743. record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
  3744. }
  3745. /* Floating point - compare instructions. */
  3746. else if ((insn_bits12_15 & 0x03) == 0x02)
  3747. {
  3748. if (record_debug)
  3749. debug_printf ("FP - immediate");
  3750. record_buf[0] = AARCH64_CPSR_REGNUM;
  3751. }
  3752. /* Floating point - integer conversions instructions. */
  3753. else if (insn_bits12_15 == 0x00)
  3754. {
  3755. /* Convert float to integer instruction. */
  3756. if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
  3757. {
  3758. if (record_debug)
  3759. debug_printf ("float to int conversion");
  3760. record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
  3761. }
  3762. /* Convert integer to float instruction. */
  3763. else if ((opcode >> 1) == 0x01 && !rmode)
  3764. {
  3765. if (record_debug)
  3766. debug_printf ("int to float conversion");
  3767. record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
  3768. }
  3769. /* Move float to integer instruction. */
  3770. else if ((opcode >> 1) == 0x03)
  3771. {
  3772. if (record_debug)
  3773. debug_printf ("move float to int");
  3774. if (!(opcode & 0x01))
  3775. record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
  3776. else
  3777. record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
  3778. }
  3779. else
  3780. return AARCH64_RECORD_UNKNOWN;
  3781. }
  3782. else
  3783. return AARCH64_RECORD_UNKNOWN;
  3784. }
  3785. else
  3786. return AARCH64_RECORD_UNKNOWN;
  3787. }
  3788. else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
  3789. {
  3790. if (record_debug)
  3791. debug_printf ("SIMD copy");
  3792. /* Advanced SIMD copy instructions. */
  3793. if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
  3794. && !bit (aarch64_insn_r->aarch64_insn, 15)
  3795. && bit (aarch64_insn_r->aarch64_insn, 10))
  3796. {
  3797. if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
  3798. record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
  3799. else
  3800. record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
  3801. }
  3802. else
  3803. record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
  3804. }
  3805. /* All remaining floating point or advanced SIMD instructions. */
  3806. else
  3807. {
  3808. if (record_debug)
  3809. debug_printf ("all remain");
  3810. record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
  3811. }
  3812. if (record_debug)
  3813. debug_printf ("\n");
  3814. /* Record the V/X register. */
  3815. aarch64_insn_r->reg_rec_count++;
  3816. /* Some of these instructions may set bits in the FPSR, so record it
  3817. too. */
  3818. record_buf[1] = AARCH64_FPSR_REGNUM;
  3819. aarch64_insn_r->reg_rec_count++;
  3820. gdb_assert (aarch64_insn_r->reg_rec_count == 2);
  3821. REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
  3822. record_buf);
  3823. return AARCH64_RECORD_SUCCESS;
  3824. }
  3825. /* Decodes insns type and invokes its record handler. */
  3826. static unsigned int
  3827. aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
  3828. {
  3829. uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
  3830. ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
  3831. ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
  3832. ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
  3833. ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
  3834. /* Data processing - immediate instructions. */
  3835. if (!ins_bit26 && !ins_bit27 && ins_bit28)
  3836. return aarch64_record_data_proc_imm (aarch64_insn_r);
  3837. /* Branch, exception generation and system instructions. */
  3838. if (ins_bit26 && !ins_bit27 && ins_bit28)
  3839. return aarch64_record_branch_except_sys (aarch64_insn_r);
  3840. /* Load and store instructions. */
  3841. if (!ins_bit25 && ins_bit27)
  3842. return aarch64_record_load_store (aarch64_insn_r);
  3843. /* Data processing - register instructions. */
  3844. if (ins_bit25 && !ins_bit26 && ins_bit27)
  3845. return aarch64_record_data_proc_reg (aarch64_insn_r);
  3846. /* Data processing - SIMD and floating point instructions. */
  3847. if (ins_bit25 && ins_bit26 && ins_bit27)
  3848. return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
  3849. return AARCH64_RECORD_UNSUPPORTED;
  3850. }
  3851. /* Cleans up local record registers and memory allocations. */
  3852. static void
  3853. deallocate_reg_mem (insn_decode_record *record)
  3854. {
  3855. xfree (record->aarch64_regs);
  3856. xfree (record->aarch64_mems);
  3857. }
  3858. #if GDB_SELF_TEST
  3859. namespace selftests {
  3860. static void
  3861. aarch64_process_record_test (void)
  3862. {
  3863. struct gdbarch_info info;
  3864. uint32_t ret;
  3865. info.bfd_arch_info = bfd_scan_arch ("aarch64");
  3866. struct gdbarch *gdbarch = gdbarch_find_by_info (info);
  3867. SELF_CHECK (gdbarch != NULL);
  3868. insn_decode_record aarch64_record;
  3869. memset (&aarch64_record, 0, sizeof (insn_decode_record));
  3870. aarch64_record.regcache = NULL;
  3871. aarch64_record.this_addr = 0;
  3872. aarch64_record.gdbarch = gdbarch;
  3873. /* 20 00 80 f9 prfm pldl1keep, [x1] */
  3874. aarch64_record.aarch64_insn = 0xf9800020;
  3875. ret = aarch64_record_decode_insn_handler (&aarch64_record);
  3876. SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
  3877. SELF_CHECK (aarch64_record.reg_rec_count == 0);
  3878. SELF_CHECK (aarch64_record.mem_rec_count == 0);
  3879. deallocate_reg_mem (&aarch64_record);
  3880. }
  3881. } // namespace selftests
  3882. #endif /* GDB_SELF_TEST */
  3883. /* Parse the current instruction and record the values of the registers and
  3884. memory that will be changed in current instruction to record_arch_list
  3885. return -1 if something is wrong. */
  3886. int
  3887. aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
  3888. CORE_ADDR insn_addr)
  3889. {
  3890. uint32_t rec_no = 0;
  3891. uint8_t insn_size = 4;
  3892. uint32_t ret = 0;
  3893. gdb_byte buf[insn_size];
  3894. insn_decode_record aarch64_record;
  3895. memset (&buf[0], 0, insn_size);
  3896. memset (&aarch64_record, 0, sizeof (insn_decode_record));
  3897. target_read_memory (insn_addr, &buf[0], insn_size);
  3898. aarch64_record.aarch64_insn
  3899. = (uint32_t) extract_unsigned_integer (&buf[0],
  3900. insn_size,
  3901. gdbarch_byte_order (gdbarch));
  3902. aarch64_record.regcache = regcache;
  3903. aarch64_record.this_addr = insn_addr;
  3904. aarch64_record.gdbarch = gdbarch;
  3905. ret = aarch64_record_decode_insn_handler (&aarch64_record);
  3906. if (ret == AARCH64_RECORD_UNSUPPORTED)
  3907. {
  3908. gdb_printf (gdb_stderr,
  3909. _("Process record does not support instruction "
  3910. "0x%0x at address %s.\n"),
  3911. aarch64_record.aarch64_insn,
  3912. paddress (gdbarch, insn_addr));
  3913. ret = -1;
  3914. }
  3915. if (0 == ret)
  3916. {
  3917. /* Record registers. */
  3918. record_full_arch_list_add_reg (aarch64_record.regcache,
  3919. AARCH64_PC_REGNUM);
  3920. /* Always record register CPSR. */
  3921. record_full_arch_list_add_reg (aarch64_record.regcache,
  3922. AARCH64_CPSR_REGNUM);
  3923. if (aarch64_record.aarch64_regs)
  3924. for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
  3925. if (record_full_arch_list_add_reg (aarch64_record.regcache,
  3926. aarch64_record.aarch64_regs[rec_no]))
  3927. ret = -1;
  3928. /* Record memories. */
  3929. if (aarch64_record.aarch64_mems)
  3930. for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
  3931. if (record_full_arch_list_add_mem
  3932. ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
  3933. aarch64_record.aarch64_mems[rec_no].len))
  3934. ret = -1;
  3935. if (record_full_arch_list_add_end ())
  3936. ret = -1;
  3937. }
  3938. deallocate_reg_mem (&aarch64_record);
  3939. return ret;
  3940. }