12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670126711267212673126741267512676126771267812679126801268112682126831268412685126861268712688126891269012691126921269312694126951269612697126981269912700127011270212703127041270512706127071270812709127101271112712127131271412715127161271712718127191272012721127221272312724127251272612727127281272912730127311273212733127341273512736127371273812739127401274112742127431274412745127461274712748127491275012751127521275312754127551275612757127581275912760127611276212763127641276512766127671276812769127701277112772127731277412775127761277712778127791278012781127821278312784127851278612787127881278912790127911279212793127941279512796127971279812799128001280112802128031280412805128061280712808128091281012811128121281312814128151281612817128181281912820128211282212823128241282512826128271282812829128301283112832128331283412835128361283712838128391284012841128421284312844128451284612847128481284912850128511285212853128541285512856128571285812859128601286112862128631286412865128661286712868128691287012871128721287312874128751287612877128781287912880128811288212883128841288512886128871288812889128901289112892128931289412895128961289712898128991290012901129021290312904129051290612907129081290912910129111291212913129141291512916129171291812919129201292112922129231292412925129261292712928129291293012931129321293312934129351293612937129381293912940129411294212943129441294512946129471294812949129501295112952129531295412955129561295712958129591296012961129621296312964129651296612967129681296912970129711297212973129741297512976129771297812979129801298112982129831298412985129861298712988129891299012991129921299312994129951299612997129981299913000130011300213003130041300513006130071300813009130101301113012130131301413015130161301713018130191302013021130221302313024130251302613027130281302913030130311303213033130341303513036130371303813039130401304113042130431304413045130461304713048130491305013051130521305313054130551305613057130581305913060130611306213063130641306513066130671306813069130701307113072130731307413075130761307713078130791308013081130821308313084130851308613087130881308913090130911309213093130941309513096130971309813099131001310113102131031310413105131061310713108131091311013111131121311313114131151311613117131181311913120131211312213123131241312513126131271312813129131301313113132131331313413135131361313713138131391314013141131421314313144131451314613147131481314913150131511315213153131541315513156131571315813159131601316113162131631316413165131661316713168131691317013171131721317313174131751317613177131781317913180131811318213183131841318513186131871318813189131901319113192131931319413195131961319713198131991320013201132021320313204132051320613207132081320913210132111321213213132141321513216132171321813219132201322113222132231322413225132261322713228132291323013231132321323313234132351323613237132381323913240132411324213243132441324513246132471324813249132501325113252132531325413255132561325713258132591326013261132621326313264132651326613267132681326913270132711327213273132741327513276132771327813279132801328113282132831328413285132861328713288132891329013291132921329313294132951329613297132981329913300133011330213303133041330513306133071330813309133101331113312133131331413315133161331713318133191332013321133221332313324133251332613327133281332913330133311333213333133341333513336133371333813339133401334113342133431334413345133461334713348133491335013351133521335313354133551335613357133581335913360133611336213363133641336513366133671336813369133701337113372133731337413375133761337713378133791338013381133821338313384133851338613387133881338913390133911339213393133941339513396133971339813399134001340113402134031340413405134061340713408134091341013411134121341313414134151341613417134181341913420134211342213423134241342513426134271342813429134301343113432134331343413435134361343713438134391344013441134421344313444134451344613447134481344913450134511345213453134541345513456134571345813459134601346113462134631346413465134661346713468134691347013471134721347313474134751347613477134781347913480134811348213483134841348513486134871348813489134901349113492134931349413495134961349713498134991350013501135021350313504135051350613507135081350913510135111351213513135141351513516135171351813519135201352113522135231352413525135261352713528135291353013531135321353313534135351353613537135381353913540135411354213543135441354513546135471354813549135501355113552135531355413555135561355713558135591356013561135621356313564135651356613567135681356913570135711357213573135741357513576135771357813579135801358113582135831358413585135861358713588135891359013591135921359313594135951359613597135981359913600136011360213603136041360513606136071360813609136101361113612136131361413615136161361713618136191362013621136221362313624136251362613627136281362913630136311363213633136341363513636136371363813639136401364113642136431364413645136461364713648136491365013651136521365313654136551365613657136581365913660136611366213663136641366513666136671366813669136701367113672136731367413675136761367713678136791368013681136821368313684136851368613687136881368913690136911369213693136941369513696136971369813699137001370113702137031370413705137061370713708137091371013711137121371313714137151371613717137181371913720137211372213723137241372513726137271372813729137301373113732137331373413735137361373713738137391374013741137421374313744137451374613747137481374913750137511375213753137541375513756137571375813759137601376113762137631376413765137661376713768137691377013771137721377313774137751377613777137781377913780137811378213783137841378513786137871378813789137901379113792137931379413795137961379713798137991380013801138021380313804138051380613807138081380913810138111381213813138141381513816138171381813819138201382113822138231382413825138261382713828138291383013831138321383313834138351383613837138381383913840138411384213843138441384513846138471384813849138501385113852138531385413855138561385713858138591386013861138621386313864138651386613867138681386913870138711387213873138741387513876138771387813879138801388113882138831388413885138861388713888138891389013891138921389313894138951389613897138981389913900139011390213903139041390513906139071390813909139101391113912139131391413915139161391713918139191392013921139221392313924139251392613927139281392913930139311393213933139341393513936139371393813939139401394113942139431394413945139461394713948139491395013951139521395313954139551395613957139581395913960139611396213963139641396513966139671396813969139701397113972139731397413975139761397713978139791398013981139821398313984139851398613987139881398913990139911399213993139941399513996139971399813999140001400114002140031400414005140061400714008140091401014011140121401314014140151401614017140181401914020140211402214023140241402514026140271402814029140301403114032140331403414035140361403714038140391404014041140421404314044140451404614047140481404914050140511405214053140541405514056140571405814059140601406114062140631406414065140661406714068140691407014071140721407314074140751407614077140781407914080140811408214083140841408514086140871408814089140901409114092140931409414095140961409714098140991410014101141021410314104141051410614107141081410914110141111411214113141141411514116141171411814119141201412114122141231412414125141261412714128141291413014131141321413314134141351413614137141381413914140141411414214143141441414514146141471414814149141501415114152141531415414155141561415714158141591416014161141621416314164141651416614167141681416914170141711417214173141741417514176141771417814179141801418114182141831418414185141861418714188141891419014191141921419314194141951419614197141981419914200142011420214203142041420514206142071420814209142101421114212142131421414215142161421714218142191422014221142221422314224142251422614227142281422914230142311423214233142341423514236142371423814239142401424114242142431424414245142461424714248142491425014251142521425314254142551425614257142581425914260142611426214263142641426514266142671426814269142701427114272142731427414275142761427714278142791428014281142821428314284142851428614287142881428914290142911429214293142941429514296142971429814299143001430114302143031430414305143061430714308143091431014311143121431314314143151431614317143181431914320143211432214323143241432514326143271432814329143301433114332143331433414335143361433714338143391434014341143421434314344143451434614347143481434914350143511435214353143541435514356143571435814359143601436114362143631436414365143661436714368143691437014371143721437314374143751437614377143781437914380143811438214383 |
- /* simulator.c -- Interface for the AArch64 simulator.
- Copyright (C) 2015-2022 Free Software Foundation, Inc.
- Contributed by Red Hat.
- This file is part of GDB.
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
- /* This must come before any other includes. */
- #include "defs.h"
- #include <stdlib.h>
- #include <stdio.h>
- #include <string.h>
- #include <sys/types.h>
- #include <math.h>
- #include <time.h>
- #include <limits.h>
- #include "simulator.h"
- #include "cpustate.h"
- #include "memory.h"
- #include "sim-signal.h"
- #define NO_SP 0
- #define SP_OK 1
- #define TST(_flag) (aarch64_test_CPSR_bit (cpu, _flag))
- #define IS_SET(_X) (TST (( _X )) ? 1 : 0)
- #define IS_CLEAR(_X) (TST (( _X )) ? 0 : 1)
- /* Space saver macro. */
- #define INSTR(HIGH, LOW) uimm (aarch64_get_instr (cpu), (HIGH), (LOW))
- #define HALT_UNALLOC \
- do \
- { \
- TRACE_DISASM (cpu, aarch64_get_PC (cpu)); \
- TRACE_INSN (cpu, \
- "Unallocated instruction detected at sim line %d," \
- " exe addr %" PRIx64, \
- __LINE__, aarch64_get_PC (cpu)); \
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),\
- sim_stopped, SIM_SIGILL); \
- } \
- while (0)
- #define HALT_NYI \
- do \
- { \
- TRACE_DISASM (cpu, aarch64_get_PC (cpu)); \
- TRACE_INSN (cpu, \
- "Unimplemented instruction detected at sim line %d," \
- " exe addr %" PRIx64, \
- __LINE__, aarch64_get_PC (cpu)); \
- if (! TRACE_ANY_P (cpu)) \
- sim_io_eprintf (CPU_STATE (cpu), "SIM Error: Unimplemented instruction: %#08x\n", \
- aarch64_get_instr (cpu)); \
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),\
- sim_stopped, SIM_SIGABRT); \
- } \
- while (0)
- #define NYI_assert(HI, LO, EXPECTED) \
- do \
- { \
- if (INSTR ((HI), (LO)) != (EXPECTED)) \
- HALT_NYI; \
- } \
- while (0)
- /* Helper functions used by expandLogicalImmediate. */
- /* for i = 1, ... N result<i-1> = 1 other bits are zero */
- static inline uint64_t
- ones (int N)
- {
- return (N == 64 ? (uint64_t)-1UL : ((1UL << N) - 1));
- }
- /* result<0> to val<N> */
- static inline uint64_t
- pickbit (uint64_t val, int N)
- {
- return pickbits64 (val, N, N);
- }
- static uint64_t
- expand_logical_immediate (uint32_t S, uint32_t R, uint32_t N)
- {
- uint64_t mask;
- uint64_t imm;
- unsigned simd_size;
- /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
- (in other words, right rotated by R), then replicated. */
- if (N != 0)
- {
- simd_size = 64;
- mask = 0xffffffffffffffffull;
- }
- else
- {
- switch (S)
- {
- case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
- case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
- case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
- case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
- case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
- default: return 0;
- }
- mask = (1ull << simd_size) - 1;
- /* Top bits are IGNORED. */
- R &= simd_size - 1;
- }
- /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
- if (S == simd_size - 1)
- return 0;
- /* S+1 consecutive bits to 1. */
- /* NOTE: S can't be 63 due to detection above. */
- imm = (1ull << (S + 1)) - 1;
- /* Rotate to the left by simd_size - R. */
- if (R != 0)
- imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
- /* Replicate the value according to SIMD size. */
- switch (simd_size)
- {
- case 2: imm = (imm << 2) | imm;
- case 4: imm = (imm << 4) | imm;
- case 8: imm = (imm << 8) | imm;
- case 16: imm = (imm << 16) | imm;
- case 32: imm = (imm << 32) | imm;
- case 64: break;
- default: return 0;
- }
- return imm;
- }
- /* Instr[22,10] encodes N immr and imms. we want a lookup table
- for each possible combination i.e. 13 bits worth of int entries. */
- #define LI_TABLE_SIZE (1 << 13)
- static uint64_t LITable[LI_TABLE_SIZE];
- void
- aarch64_init_LIT_table (void)
- {
- unsigned index;
- for (index = 0; index < LI_TABLE_SIZE; index++)
- {
- uint32_t N = uimm (index, 12, 12);
- uint32_t immr = uimm (index, 11, 6);
- uint32_t imms = uimm (index, 5, 0);
- LITable [index] = expand_logical_immediate (imms, immr, N);
- }
- }
- static void
- dexNotify (sim_cpu *cpu)
- {
- /* instr[14,0] == type : 0 ==> method entry, 1 ==> method reentry
- 2 ==> exit Java, 3 ==> start next bytecode. */
- uint32_t type = INSTR (14, 0);
- TRACE_EVENTS (cpu, "Notify Insn encountered, type = 0x%x", type);
- switch (type)
- {
- case 0:
- /* aarch64_notifyMethodEntry (aarch64_get_reg_u64 (cpu, R23, 0),
- aarch64_get_reg_u64 (cpu, R22, 0)); */
- break;
- case 1:
- /* aarch64_notifyMethodReentry (aarch64_get_reg_u64 (cpu, R23, 0),
- aarch64_get_reg_u64 (cpu, R22, 0)); */
- break;
- case 2:
- /* aarch64_notifyMethodExit (); */
- break;
- case 3:
- /* aarch64_notifyBCStart (aarch64_get_reg_u64 (cpu, R23, 0),
- aarch64_get_reg_u64 (cpu, R22, 0)); */
- break;
- }
- }
- /* secondary decode within top level groups */
- static void
- dexPseudo (sim_cpu *cpu)
- {
- /* assert instr[28,27] = 00
- We provide 2 pseudo instructions:
- HALT stops execution of the simulator causing an immediate
- return to the x86 code which entered it.
- CALLOUT initiates recursive entry into x86 code. A register
- argument holds the address of the x86 routine. Immediate
- values in the instruction identify the number of general
- purpose and floating point register arguments to be passed
- and the type of any value to be returned. */
- uint32_t PSEUDO_HALT = 0xE0000000U;
- uint32_t PSEUDO_CALLOUT = 0x00018000U;
- uint32_t PSEUDO_CALLOUTR = 0x00018001U;
- uint32_t PSEUDO_NOTIFY = 0x00014000U;
- uint32_t dispatch;
- if (aarch64_get_instr (cpu) == PSEUDO_HALT)
- {
- TRACE_EVENTS (cpu, " Pseudo Halt Instruction");
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
- sim_stopped, SIM_SIGTRAP);
- }
- dispatch = INSTR (31, 15);
- /* We do not handle callouts at the moment. */
- if (dispatch == PSEUDO_CALLOUT || dispatch == PSEUDO_CALLOUTR)
- {
- TRACE_EVENTS (cpu, " Callout");
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
- sim_stopped, SIM_SIGABRT);
- }
- else if (dispatch == PSEUDO_NOTIFY)
- dexNotify (cpu);
- else
- HALT_UNALLOC;
- }
- /* Load-store single register (unscaled offset)
- These instructions employ a base register plus an unscaled signed
- 9 bit offset.
- N.B. the base register (source) can be Xn or SP. all other
- registers may not be SP. */
- /* 32 bit load 32 bit unscaled signed 9 bit. */
- static void
- ldur32 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + offset));
- }
- /* 64 bit load 64 bit unscaled signed 9 bit. */
- static void
- ldur64 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + offset));
- }
- /* 32 bit load zero-extended byte unscaled signed 9 bit. */
- static void
- ldurb32 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u8
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + offset));
- }
- /* 32 bit load sign-extended byte unscaled signed 9 bit. */
- static void
- ldursb32 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rt, NO_SP, (uint32_t) aarch64_get_mem_s8
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + offset));
- }
- /* 64 bit load sign-extended byte unscaled signed 9 bit. */
- static void
- ldursb64 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s8
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + offset));
- }
- /* 32 bit load zero-extended short unscaled signed 9 bit */
- static void
- ldurh32 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_mem_u16
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + offset));
- }
- /* 32 bit load sign-extended short unscaled signed 9 bit */
- static void
- ldursh32 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) aarch64_get_mem_s16
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + offset));
- }
- /* 64 bit load sign-extended short unscaled signed 9 bit */
- static void
- ldursh64 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s16
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + offset));
- }
- /* 64 bit load sign-extended word unscaled signed 9 bit */
- static void
- ldursw (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) aarch64_get_mem_s32
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + offset));
- }
- /* N.B. with stores the value in source is written to the address
- identified by source2 modified by offset. */
- /* 32 bit store 32 bit unscaled signed 9 bit. */
- static void
- stur32 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u32 (cpu,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
- aarch64_get_reg_u32 (cpu, rd, NO_SP));
- }
- /* 64 bit store 64 bit unscaled signed 9 bit */
- static void
- stur64 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u64 (cpu,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
- aarch64_get_reg_u64 (cpu, rd, NO_SP));
- }
- /* 32 bit store byte unscaled signed 9 bit */
- static void
- sturb (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u8 (cpu,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
- aarch64_get_reg_u8 (cpu, rd, NO_SP));
- }
- /* 32 bit store short unscaled signed 9 bit */
- static void
- sturh (sim_cpu *cpu, int32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u16 (cpu,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
- aarch64_get_reg_u16 (cpu, rd, NO_SP));
- }
- /* Load single register pc-relative label
- Offset is a signed 19 bit immediate count in words
- rt may not be SP. */
- /* 32 bit pc-relative load */
- static void
- ldr32_pcrel (sim_cpu *cpu, int32_t offset)
- {
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_mem_u32
- (cpu, aarch64_get_PC (cpu) + offset * 4));
- }
- /* 64 bit pc-relative load */
- static void
- ldr_pcrel (sim_cpu *cpu, int32_t offset)
- {
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_mem_u64
- (cpu, aarch64_get_PC (cpu) + offset * 4));
- }
- /* sign extended 32 bit pc-relative load */
- static void
- ldrsw_pcrel (sim_cpu *cpu, int32_t offset)
- {
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_mem_s32
- (cpu, aarch64_get_PC (cpu) + offset * 4));
- }
- /* float pc-relative load */
- static void
- fldrs_pcrel (sim_cpu *cpu, int32_t offset)
- {
- unsigned int rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u32 (cpu, rd, 0,
- aarch64_get_mem_u32
- (cpu, aarch64_get_PC (cpu) + offset * 4));
- }
- /* double pc-relative load */
- static void
- fldrd_pcrel (sim_cpu *cpu, int32_t offset)
- {
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u64 (cpu, st, 0,
- aarch64_get_mem_u64
- (cpu, aarch64_get_PC (cpu) + offset * 4));
- }
- /* long double pc-relative load. */
- static void
- fldrq_pcrel (sim_cpu *cpu, int32_t offset)
- {
- unsigned int st = INSTR (4, 0);
- uint64_t addr = aarch64_get_PC (cpu) + offset * 4;
- FRegister a;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_get_mem_long_double (cpu, addr, & a);
- aarch64_set_FP_long_double (cpu, st, a);
- }
- /* This can be used to scale an offset by applying
- the requisite shift. the second argument is either
- 16, 32 or 64. */
- #define SCALE(_offset, _elementSize) \
- ((_offset) << ScaleShift ## _elementSize)
- /* This can be used to optionally scale a register derived offset
- by applying the requisite shift as indicated by the Scaling
- argument. The second argument is either Byte, Short, Word
- or Long. The third argument is either Scaled or Unscaled.
- N.B. when _Scaling is Scaled the shift gets ANDed with
- all 1s while when it is Unscaled it gets ANDed with 0. */
- #define OPT_SCALE(_offset, _elementType, _Scaling) \
- ((_offset) << (_Scaling ? ScaleShift ## _elementType : 0))
- /* This can be used to zero or sign extend a 32 bit register derived
- value to a 64 bit value. the first argument must be the value as
- a uint32_t and the second must be either UXTW or SXTW. The result
- is returned as an int64_t. */
- static inline int64_t
- extend (uint32_t value, Extension extension)
- {
- union
- {
- uint32_t u;
- int32_t n;
- } x;
- /* A branchless variant of this ought to be possible. */
- if (extension == UXTW || extension == NoExtension)
- return value;
- x.u = value;
- return x.n;
- }
- /* Scalar Floating Point
- FP load/store single register (4 addressing modes)
- N.B. the base register (source) can be the stack pointer.
- The secondary source register (source2) can only be an Xn register. */
- /* Load 32 bit unscaled signed 9 bit with pre- or post-writeback. */
- static void
- fldrs_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_mem_u32 (cpu, address));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* Load 8 bit with unsigned 12 bit offset. */
- static void
- fldrb_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rd = INSTR (4, 0);
- unsigned rn = INSTR (9, 5);
- uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u8 (cpu, rd, 0, aarch64_get_mem_u32 (cpu, addr));
- }
- /* Load 16 bit scaled unsigned 12 bit. */
- static void
- fldrh_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rd = INSTR (4, 0);
- unsigned rn = INSTR (9, 5);
- uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 16);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u16 (cpu, rd, 0, aarch64_get_mem_u16 (cpu, addr));
- }
- /* Load 32 bit scaled unsigned 12 bit. */
- static void
- fldrs_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rd = INSTR (4, 0);
- unsigned rn = INSTR (9, 5);
- uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 32);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u32 (cpu, rd, 0, aarch64_get_mem_u32 (cpu, addr));
- }
- /* Load 64 bit scaled unsigned 12 bit. */
- static void
- fldrd_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rd = INSTR (4, 0);
- unsigned rn = INSTR (9, 5);
- uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 64);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u64 (cpu, rd, 0, aarch64_get_mem_u64 (cpu, addr));
- }
- /* Load 128 bit scaled unsigned 12 bit. */
- static void
- fldrq_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rd = INSTR (4, 0);
- unsigned rn = INSTR (9, 5);
- uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 128);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u64 (cpu, rd, 0, aarch64_get_mem_u64 (cpu, addr));
- aarch64_set_vec_u64 (cpu, rd, 1, aarch64_get_mem_u64 (cpu, addr + 8));
- }
- /* Load 32 bit scaled or unscaled zero- or sign-extended
- 32-bit register offset. */
- static void
- fldrs_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 32, scaling);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_mem_u32
- (cpu, address + displacement));
- }
- /* Load 64 bit unscaled signed 9 bit with pre- or post-writeback. */
- static void
- fldrd_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u64 (cpu, st, 0, aarch64_get_mem_u64 (cpu, address));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* Load 64 bit scaled or unscaled zero- or sign-extended 32-bit register offset. */
- static void
- fldrd_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 64, scaling);
- fldrd_wb (cpu, displacement, NoWriteBack);
- }
- /* Load 128 bit unscaled signed 9 bit with pre- or post-writeback. */
- static void
- fldrq_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- FRegister a;
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_get_mem_long_double (cpu, address, & a);
- aarch64_set_FP_long_double (cpu, st, a);
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* Load 128 bit scaled or unscaled zero- or sign-extended 32-bit register offset */
- static void
- fldrq_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 128, scaling);
- fldrq_wb (cpu, displacement, NoWriteBack);
- }
- /* Memory Access
- load-store single register
- There are four addressing modes available here which all employ a
- 64 bit source (base) register.
- N.B. the base register (source) can be the stack pointer.
- The secondary source register (source2)can only be an Xn register.
- Scaled, 12-bit, unsigned immediate offset, without pre- and
- post-index options.
- Unscaled, 9-bit, signed immediate offset with pre- or post-index
- writeback.
- scaled or unscaled 64-bit register offset.
- scaled or unscaled 32-bit extended register offset.
- All offsets are assumed to be raw from the decode i.e. the
- simulator is expected to adjust scaled offsets based on the
- accessed data size with register or extended register offset
- versions the same applies except that in the latter case the
- operation may also require a sign extend.
- A separate method is provided for each possible addressing mode. */
- /* 32 bit load 32 bit scaled unsigned 12 bit */
- static void
- ldr32_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* The target register may not be SP but the source may be. */
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + SCALE (offset, 32)));
- }
- /* 32 bit load 32 bit unscaled signed 9 bit with pre- or post-writeback. */
- static void
- ldr32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32 (cpu, address));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 32 bit load 32 bit scaled or unscaled
- zero- or sign-extended 32-bit register offset */
- static void
- ldr32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 32, scaling);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rt, NO_SP,
- aarch64_get_mem_u32 (cpu, address + displacement));
- }
- /* 64 bit load 64 bit scaled unsigned 12 bit */
- static void
- ldr_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* The target register may not be SP but the source may be. */
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + SCALE (offset, 64)));
- }
- /* 64 bit load 64 bit unscaled signed 9 bit with pre- or post-writeback. */
- static void
- ldr_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64 (cpu, address));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 64 bit load 64 bit scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- ldr_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 64, scaling);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rt, NO_SP,
- aarch64_get_mem_u64 (cpu, address + displacement));
- }
- /* 32 bit load zero-extended byte scaled unsigned 12 bit. */
- static void
- ldrb32_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* The target register may not be SP but the source may be
- there is no scaling required for a byte load. */
- aarch64_set_reg_u64 (cpu, rt, NO_SP,
- aarch64_get_mem_u8
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset));
- }
- /* 32 bit load zero-extended byte unscaled signed 9 bit with pre- or post-writeback. */
- static void
- ldrb32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u8 (cpu, address));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 32 bit load zero-extended byte scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- ldrb32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* There is no scaling required for a byte load. */
- aarch64_set_reg_u64 (cpu, rt, NO_SP,
- aarch64_get_mem_u8 (cpu, address + displacement));
- }
- /* 64 bit load sign-extended byte unscaled signed 9 bit
- with pre- or post-writeback. */
- static void
- ldrsb_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- int64_t val;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- val = aarch64_get_mem_s8 (cpu, address);
- aarch64_set_reg_s64 (cpu, rt, NO_SP, val);
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 64 bit load sign-extended byte scaled unsigned 12 bit. */
- static void
- ldrsb_abs (sim_cpu *cpu, uint32_t offset)
- {
- ldrsb_wb (cpu, offset, NoWriteBack);
- }
- /* 64 bit load sign-extended byte scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- ldrsb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* There is no scaling required for a byte load. */
- aarch64_set_reg_s64 (cpu, rt, NO_SP,
- aarch64_get_mem_s8 (cpu, address + displacement));
- }
- /* 32 bit load zero-extended short scaled unsigned 12 bit. */
- static void
- ldrh32_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint32_t val;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* The target register may not be SP but the source may be. */
- val = aarch64_get_mem_u16 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + SCALE (offset, 16));
- aarch64_set_reg_u32 (cpu, rt, NO_SP, val);
- }
- /* 32 bit load zero-extended short unscaled signed 9 bit
- with pre- or post-writeback. */
- static void
- ldrh32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u32 (cpu, rt, NO_SP, aarch64_get_mem_u16 (cpu, address));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 32 bit load zero-extended short scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- ldrh32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 16, scaling);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u32 (cpu, rt, NO_SP,
- aarch64_get_mem_u16 (cpu, address + displacement));
- }
- /* 32 bit load sign-extended short scaled unsigned 12 bit. */
- static void
- ldrsh32_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- int32_t val;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* The target register may not be SP but the source may be. */
- val = aarch64_get_mem_s16 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + SCALE (offset, 16));
- aarch64_set_reg_s32 (cpu, rt, NO_SP, val);
- }
- /* 32 bit load sign-extended short unscaled signed 9 bit
- with pre- or post-writeback. */
- static void
- ldrsh32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_s32 (cpu, rt, NO_SP,
- (int32_t) aarch64_get_mem_s16 (cpu, address));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 32 bit load sign-extended short scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- ldrsh32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 16, scaling);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_s32 (cpu, rt, NO_SP,
- (int32_t) aarch64_get_mem_s16
- (cpu, address + displacement));
- }
- /* 64 bit load sign-extended short scaled unsigned 12 bit. */
- static void
- ldrsh_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- int64_t val;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* The target register may not be SP but the source may be. */
- val = aarch64_get_mem_s16 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + SCALE (offset, 16));
- aarch64_set_reg_s64 (cpu, rt, NO_SP, val);
- }
- /* 64 bit load sign-extended short unscaled signed 9 bit
- with pre- or post-writeback. */
- static void
- ldrsh64_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- int64_t val;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- val = aarch64_get_mem_s16 (cpu, address);
- aarch64_set_reg_s64 (cpu, rt, NO_SP, val);
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 64 bit load sign-extended short scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- ldrsh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 16, scaling);
- int64_t val;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- val = aarch64_get_mem_s16 (cpu, address + displacement);
- aarch64_set_reg_s64 (cpu, rt, NO_SP, val);
- }
- /* 64 bit load sign-extended 32 bit scaled unsigned 12 bit. */
- static void
- ldrsw_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- int64_t val;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- val = aarch64_get_mem_s32 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + SCALE (offset, 32));
- /* The target register may not be SP but the source may be. */
- return aarch64_set_reg_s64 (cpu, rt, NO_SP, val);
- }
- /* 64 bit load sign-extended 32 bit unscaled signed 9 bit
- with pre- or post-writeback. */
- static void
- ldrsw_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s32 (cpu, address));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 64 bit load sign-extended 32 bit scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- ldrsw_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 32, scaling);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_s64 (cpu, rt, NO_SP,
- aarch64_get_mem_s32 (cpu, address + displacement));
- }
- /* N.B. with stores the value in source is written to the
- address identified by source2 modified by source3/offset. */
- /* 32 bit store scaled unsigned 12 bit. */
- static void
- str32_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* The target register may not be SP but the source may be. */
- aarch64_set_mem_u32 (cpu, (aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + SCALE (offset, 32)),
- aarch64_get_reg_u32 (cpu, rt, NO_SP));
- }
- /* 32 bit store unscaled signed 9 bit with pre- or post-writeback. */
- static void
- str32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u32 (cpu, address, aarch64_get_reg_u32 (cpu, rt, NO_SP));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 32 bit store scaled or unscaled zero- or
- sign-extended 32-bit register offset. */
- static void
- str32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 32, scaling);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u32 (cpu, address + displacement,
- aarch64_get_reg_u64 (cpu, rt, NO_SP));
- }
- /* 64 bit store scaled unsigned 12 bit. */
- static void
- str_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u64 (cpu,
- aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + SCALE (offset, 64),
- aarch64_get_reg_u64 (cpu, rt, NO_SP));
- }
- /* 64 bit store unscaled signed 9 bit with pre- or post-writeback. */
- static void
- str_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u64 (cpu, address, aarch64_get_reg_u64 (cpu, rt, NO_SP));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 64 bit store scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- str_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- uint64_t displacement = OPT_SCALE (extended, 64, scaling);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u64 (cpu, address + displacement,
- aarch64_get_reg_u64 (cpu, rt, NO_SP));
- }
- /* 32 bit store byte scaled unsigned 12 bit. */
- static void
- strb_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* The target register may not be SP but the source may be.
- There is no scaling required for a byte load. */
- aarch64_set_mem_u8 (cpu,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
- aarch64_get_reg_u8 (cpu, rt, NO_SP));
- }
- /* 32 bit store byte unscaled signed 9 bit with pre- or post-writeback. */
- static void
- strb_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u8 (cpu, address, aarch64_get_reg_u8 (cpu, rt, NO_SP));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 32 bit store byte scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- strb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* There is no scaling required for a byte load. */
- aarch64_set_mem_u8 (cpu, address + displacement,
- aarch64_get_reg_u8 (cpu, rt, NO_SP));
- }
- /* 32 bit store short scaled unsigned 12 bit. */
- static void
- strh_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* The target register may not be SP but the source may be. */
- aarch64_set_mem_u16 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + SCALE (offset, 16),
- aarch64_get_reg_u16 (cpu, rt, NO_SP));
- }
- /* 32 bit store short unscaled signed 9 bit with pre- or post-writeback. */
- static void
- strh_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address;
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u16 (cpu, address, aarch64_get_reg_u16 (cpu, rt, NO_SP));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 32 bit store short scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- strh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
- uint64_t displacement = OPT_SCALE (extended, 16, scaling);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u16 (cpu, address + displacement,
- aarch64_get_reg_u16 (cpu, rt, NO_SP));
- }
- /* Prefetch unsigned 12 bit. */
- static void
- prfm_abs (sim_cpu *cpu, uint32_t offset)
- {
- /* instr[4,0] = prfop : 00000 ==> PLDL1KEEP, 00001 ==> PLDL1STRM,
- 00010 ==> PLDL2KEEP, 00001 ==> PLDL2STRM,
- 00100 ==> PLDL3KEEP, 00101 ==> PLDL3STRM,
- 10000 ==> PSTL1KEEP, 10001 ==> PSTL1STRM,
- 10010 ==> PSTL2KEEP, 10001 ==> PSTL2STRM,
- 10100 ==> PSTL3KEEP, 10101 ==> PSTL3STRM,
- ow ==> UNALLOC
- PrfOp prfop = prfop (instr, 4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + SCALE (offset, 64). */
- /* TODO : implement prefetch of address. */
- }
- /* Prefetch scaled or unscaled zero- or sign-extended 32-bit register offset. */
- static void
- prfm_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- /* instr[4,0] = prfop : 00000 ==> PLDL1KEEP, 00001 ==> PLDL1STRM,
- 00010 ==> PLDL2KEEP, 00001 ==> PLDL2STRM,
- 00100 ==> PLDL3KEEP, 00101 ==> PLDL3STRM,
- 10000 ==> PSTL1KEEP, 10001 ==> PSTL1STRM,
- 10010 ==> PSTL2KEEP, 10001 ==> PSTL2STRM,
- 10100 ==> PSTL3KEEP, 10101 ==> PSTL3STRM,
- ow ==> UNALLOC
- rn may reference SP, rm may only reference ZR
- PrfOp prfop = prfop (instr, 4, 0);
- uint64_t base = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- uint64_t displacement = OPT_SCALE (extended, 64, scaling);
- uint64_t address = base + displacement. */
- /* TODO : implement prefetch of address */
- }
- /* 64 bit pc-relative prefetch. */
- static void
- prfm_pcrel (sim_cpu *cpu, int32_t offset)
- {
- /* instr[4,0] = prfop : 00000 ==> PLDL1KEEP, 00001 ==> PLDL1STRM,
- 00010 ==> PLDL2KEEP, 00001 ==> PLDL2STRM,
- 00100 ==> PLDL3KEEP, 00101 ==> PLDL3STRM,
- 10000 ==> PSTL1KEEP, 10001 ==> PSTL1STRM,
- 10010 ==> PSTL2KEEP, 10001 ==> PSTL2STRM,
- 10100 ==> PSTL3KEEP, 10101 ==> PSTL3STRM,
- ow ==> UNALLOC
- PrfOp prfop = prfop (instr, 4, 0);
- uint64_t address = aarch64_get_PC (cpu) + offset. */
- /* TODO : implement this */
- }
- /* Load-store exclusive. */
- static void
- ldxr (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int size = INSTR (31, 30);
- /* int ordered = INSTR (15, 15); */
- /* int exclusive = ! INSTR (23, 23); */
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (size)
- {
- case 0:
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u8 (cpu, address));
- break;
- case 1:
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u16 (cpu, address));
- break;
- case 2:
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32 (cpu, address));
- break;
- case 3:
- aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64 (cpu, address));
- break;
- }
- }
- static void
- stxr (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rt = INSTR (4, 0);
- unsigned rs = INSTR (20, 16);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int size = INSTR (31, 30);
- uint64_t data = aarch64_get_reg_u64 (cpu, rt, NO_SP);
- switch (size)
- {
- case 0: aarch64_set_mem_u8 (cpu, address, data); break;
- case 1: aarch64_set_mem_u16 (cpu, address, data); break;
- case 2: aarch64_set_mem_u32 (cpu, address, data); break;
- case 3: aarch64_set_mem_u64 (cpu, address, data); break;
- }
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rs, NO_SP, 0); /* Always exclusive... */
- }
- static void
- dexLoadLiteral (sim_cpu *cpu)
- {
- /* instr[29,27] == 011
- instr[25,24] == 00
- instr[31,30:26] = opc: 000 ==> LDRW, 001 ==> FLDRS
- 010 ==> LDRX, 011 ==> FLDRD
- 100 ==> LDRSW, 101 ==> FLDRQ
- 110 ==> PRFM, 111 ==> UNALLOC
- instr[26] ==> V : 0 ==> GReg, 1 ==> FReg
- instr[23, 5] == simm19 */
- /* unsigned rt = INSTR (4, 0); */
- uint32_t dispatch = (INSTR (31, 30) << 1) | INSTR (26, 26);
- int32_t imm = simm32 (aarch64_get_instr (cpu), 23, 5);
- switch (dispatch)
- {
- case 0: ldr32_pcrel (cpu, imm); break;
- case 1: fldrs_pcrel (cpu, imm); break;
- case 2: ldr_pcrel (cpu, imm); break;
- case 3: fldrd_pcrel (cpu, imm); break;
- case 4: ldrsw_pcrel (cpu, imm); break;
- case 5: fldrq_pcrel (cpu, imm); break;
- case 6: prfm_pcrel (cpu, imm); break;
- case 7:
- default:
- HALT_UNALLOC;
- }
- }
- /* Immediate arithmetic
- The aimm argument is a 12 bit unsigned value or a 12 bit unsigned
- value left shifted by 12 bits (done at decode).
- N.B. the register args (dest, source) can normally be Xn or SP.
- the exception occurs for flag setting instructions which may
- only use Xn for the output (dest). */
- /* 32 bit add immediate. */
- static void
- add32 (sim_cpu *cpu, uint32_t aimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u32 (cpu, rn, SP_OK) + aimm);
- }
- /* 64 bit add immediate. */
- static void
- add64 (sim_cpu *cpu, uint32_t aimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) + aimm);
- }
- static void
- set_flags_for_add32 (sim_cpu *cpu, int32_t value1, int32_t value2)
- {
- int32_t result = value1 + value2;
- int64_t sresult = (int64_t) value1 + (int64_t) value2;
- uint64_t uresult = (uint64_t)(uint32_t) value1
- + (uint64_t)(uint32_t) value2;
- uint32_t flags = 0;
- if (result == 0)
- flags |= Z;
- if (result & (1 << 31))
- flags |= N;
- if (uresult != (uint32_t)uresult)
- flags |= C;
- if (sresult != (int32_t)sresult)
- flags |= V;
- aarch64_set_CPSR (cpu, flags);
- }
- #define NEG(a) (((a) & signbit) == signbit)
- #define POS(a) (((a) & signbit) == 0)
- static void
- set_flags_for_add64 (sim_cpu *cpu, uint64_t value1, uint64_t value2)
- {
- uint64_t result = value1 + value2;
- uint32_t flags = 0;
- uint64_t signbit = 1ULL << 63;
- if (result == 0)
- flags |= Z;
- if (NEG (result))
- flags |= N;
- if ( (NEG (value1) && NEG (value2))
- || (NEG (value1) && POS (result))
- || (NEG (value2) && POS (result)))
- flags |= C;
- if ( (NEG (value1) && NEG (value2) && POS (result))
- || (POS (value1) && POS (value2) && NEG (result)))
- flags |= V;
- aarch64_set_CPSR (cpu, flags);
- }
- static void
- set_flags_for_sub32 (sim_cpu *cpu, uint32_t value1, uint32_t value2)
- {
- uint32_t result = value1 - value2;
- uint32_t flags = 0;
- uint32_t signbit = 1U << 31;
- if (result == 0)
- flags |= Z;
- if (NEG (result))
- flags |= N;
- if ( (NEG (value1) && POS (value2))
- || (NEG (value1) && POS (result))
- || (POS (value2) && POS (result)))
- flags |= C;
- if ( (NEG (value1) && POS (value2) && POS (result))
- || (POS (value1) && NEG (value2) && NEG (result)))
- flags |= V;
- aarch64_set_CPSR (cpu, flags);
- }
- static void
- set_flags_for_sub64 (sim_cpu *cpu, uint64_t value1, uint64_t value2)
- {
- uint64_t result = value1 - value2;
- uint32_t flags = 0;
- uint64_t signbit = 1ULL << 63;
- if (result == 0)
- flags |= Z;
- if (NEG (result))
- flags |= N;
- if ( (NEG (value1) && POS (value2))
- || (NEG (value1) && POS (result))
- || (POS (value2) && POS (result)))
- flags |= C;
- if ( (NEG (value1) && POS (value2) && POS (result))
- || (POS (value1) && NEG (value2) && NEG (result)))
- flags |= V;
- aarch64_set_CPSR (cpu, flags);
- }
- static void
- set_flags_for_binop32 (sim_cpu *cpu, uint32_t result)
- {
- uint32_t flags = 0;
- if (result == 0)
- flags |= Z;
- else
- flags &= ~ Z;
- if (result & (1 << 31))
- flags |= N;
- else
- flags &= ~ N;
- aarch64_set_CPSR (cpu, flags);
- }
- static void
- set_flags_for_binop64 (sim_cpu *cpu, uint64_t result)
- {
- uint32_t flags = 0;
- if (result == 0)
- flags |= Z;
- else
- flags &= ~ Z;
- if (result & (1ULL << 63))
- flags |= N;
- else
- flags &= ~ N;
- aarch64_set_CPSR (cpu, flags);
- }
- /* 32 bit add immediate set flags. */
- static void
- adds32 (sim_cpu *cpu, uint32_t aimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* TODO : do we need to worry about signs here? */
- int32_t value1 = aarch64_get_reg_s32 (cpu, rn, SP_OK);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + aimm);
- set_flags_for_add32 (cpu, value1, aimm);
- }
- /* 64 bit add immediate set flags. */
- static void
- adds64 (sim_cpu *cpu, uint32_t aimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- uint64_t value2 = aimm;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2);
- set_flags_for_add64 (cpu, value1, value2);
- }
- /* 32 bit sub immediate. */
- static void
- sub32 (sim_cpu *cpu, uint32_t aimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u32 (cpu, rn, SP_OK) - aimm);
- }
- /* 64 bit sub immediate. */
- static void
- sub64 (sim_cpu *cpu, uint32_t aimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) - aimm);
- }
- /* 32 bit sub immediate set flags. */
- static void
- subs32 (sim_cpu *cpu, uint32_t aimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- uint32_t value2 = aimm;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
- set_flags_for_sub32 (cpu, value1, value2);
- }
- /* 64 bit sub immediate set flags. */
- static void
- subs64 (sim_cpu *cpu, uint32_t aimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- uint32_t value2 = aimm;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
- set_flags_for_sub64 (cpu, value1, value2);
- }
- /* Data Processing Register. */
- /* First two helpers to perform the shift operations. */
- static inline uint32_t
- shifted32 (uint32_t value, Shift shift, uint32_t count)
- {
- switch (shift)
- {
- default:
- case LSL:
- return (value << count);
- case LSR:
- return (value >> count);
- case ASR:
- {
- int32_t svalue = value;
- return (svalue >> count);
- }
- case ROR:
- {
- uint32_t top = value >> count;
- uint32_t bottom = value << (32 - count);
- return (bottom | top);
- }
- }
- }
- static inline uint64_t
- shifted64 (uint64_t value, Shift shift, uint32_t count)
- {
- switch (shift)
- {
- default:
- case LSL:
- return (value << count);
- case LSR:
- return (value >> count);
- case ASR:
- {
- int64_t svalue = value;
- return (svalue >> count);
- }
- case ROR:
- {
- uint64_t top = value >> count;
- uint64_t bottom = value << (64 - count);
- return (bottom | top);
- }
- }
- }
- /* Arithmetic shifted register.
- These allow an optional LSL, ASR or LSR to the second source
- register with a count up to the register bit count.
- N.B register args may not be SP. */
- /* 32 bit ADD shifted register. */
- static void
- add32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u32 (cpu, rn, NO_SP)
- + shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- shift, count));
- }
- /* 64 bit ADD shifted register. */
- static void
- add64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u64 (cpu, rn, NO_SP)
- + shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
- shift, count));
- }
- /* 32 bit ADD shifted register setting flags. */
- static void
- adds32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- shift, count);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2);
- set_flags_for_add32 (cpu, value1, value2);
- }
- /* 64 bit ADD shifted register setting flags. */
- static void
- adds64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
- shift, count);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2);
- set_flags_for_add64 (cpu, value1, value2);
- }
- /* 32 bit SUB shifted register. */
- static void
- sub32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u32 (cpu, rn, NO_SP)
- - shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- shift, count));
- }
- /* 64 bit SUB shifted register. */
- static void
- sub64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u64 (cpu, rn, NO_SP)
- - shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
- shift, count));
- }
- /* 32 bit SUB shifted register setting flags. */
- static void
- subs32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- shift, count);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
- set_flags_for_sub32 (cpu, value1, value2);
- }
- /* 64 bit SUB shifted register setting flags. */
- static void
- subs64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
- shift, count);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
- set_flags_for_sub64 (cpu, value1, value2);
- }
- /* First a couple more helpers to fetch the
- relevant source register element either
- sign or zero extended as required by the
- extension value. */
- static uint32_t
- extreg32 (sim_cpu *cpu, unsigned int lo, Extension extension)
- {
- switch (extension)
- {
- case UXTB: return aarch64_get_reg_u8 (cpu, lo, NO_SP);
- case UXTH: return aarch64_get_reg_u16 (cpu, lo, NO_SP);
- case UXTW: /* Fall through. */
- case UXTX: return aarch64_get_reg_u32 (cpu, lo, NO_SP);
- case SXTB: return aarch64_get_reg_s8 (cpu, lo, NO_SP);
- case SXTH: return aarch64_get_reg_s16 (cpu, lo, NO_SP);
- case SXTW: /* Fall through. */
- case SXTX: /* Fall through. */
- default: return aarch64_get_reg_s32 (cpu, lo, NO_SP);
- }
- }
- static uint64_t
- extreg64 (sim_cpu *cpu, unsigned int lo, Extension extension)
- {
- switch (extension)
- {
- case UXTB: return aarch64_get_reg_u8 (cpu, lo, NO_SP);
- case UXTH: return aarch64_get_reg_u16 (cpu, lo, NO_SP);
- case UXTW: return aarch64_get_reg_u32 (cpu, lo, NO_SP);
- case UXTX: return aarch64_get_reg_u64 (cpu, lo, NO_SP);
- case SXTB: return aarch64_get_reg_s8 (cpu, lo, NO_SP);
- case SXTH: return aarch64_get_reg_s16 (cpu, lo, NO_SP);
- case SXTW: return aarch64_get_reg_s32 (cpu, lo, NO_SP);
- case SXTX:
- default: return aarch64_get_reg_s64 (cpu, lo, NO_SP);
- }
- }
- /* Arithmetic extending register
- These allow an optional sign extension of some portion of the
- second source register followed by an optional left shift of
- between 1 and 4 bits (i.e. a shift of 0-4 bits???)
- N.B output (dest) and first input arg (source) may normally be Xn
- or SP. However, for flag setting operations dest can only be
- Xn. Second input registers are always Xn. */
- /* 32 bit ADD extending register. */
- static void
- add32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u32 (cpu, rn, SP_OK)
- + (extreg32 (cpu, rm, extension) << shift));
- }
- /* 64 bit ADD extending register.
- N.B. This subsumes the case with 64 bit source2 and UXTX #n or LSL #0. */
- static void
- add64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u64 (cpu, rn, SP_OK)
- + (extreg64 (cpu, rm, extension) << shift));
- }
- /* 32 bit ADD extending register setting flags. */
- static void
- adds32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, SP_OK);
- uint32_t value2 = extreg32 (cpu, rm, extension) << shift;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2);
- set_flags_for_add32 (cpu, value1, value2);
- }
- /* 64 bit ADD extending register setting flags */
- /* N.B. this subsumes the case with 64 bit source2 and UXTX #n or LSL #0 */
- static void
- adds64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- uint64_t value2 = extreg64 (cpu, rm, extension) << shift;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2);
- set_flags_for_add64 (cpu, value1, value2);
- }
- /* 32 bit SUB extending register. */
- static void
- sub32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u32 (cpu, rn, SP_OK)
- - (extreg32 (cpu, rm, extension) << shift));
- }
- /* 64 bit SUB extending register. */
- /* N.B. this subsumes the case with 64 bit source2 and UXTX #n or LSL #0. */
- static void
- sub64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u64 (cpu, rn, SP_OK)
- - (extreg64 (cpu, rm, extension) << shift));
- }
- /* 32 bit SUB extending register setting flags. */
- static void
- subs32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, SP_OK);
- uint32_t value2 = extreg32 (cpu, rm, extension) << shift;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
- set_flags_for_sub32 (cpu, value1, value2);
- }
- /* 64 bit SUB extending register setting flags */
- /* N.B. this subsumes the case with 64 bit source2 and UXTX #n or LSL #0 */
- static void
- subs64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- uint64_t value2 = extreg64 (cpu, rm, extension) << shift;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 - value2);
- set_flags_for_sub64 (cpu, value1, value2);
- }
- static void
- dexAddSubtractImmediate (sim_cpu *cpu)
- {
- /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30] = op : 0 ==> ADD, 1 ==> SUB
- instr[29] = set : 0 ==> no flags, 1 ==> set flags
- instr[28,24] = 10001
- instr[23,22] = shift : 00 == LSL#0, 01 = LSL#12 1x = UNALLOC
- instr[21,10] = uimm12
- instr[9,5] = Rn
- instr[4,0] = Rd */
- /* N.B. the shift is applied at decode before calling the add/sub routine. */
- uint32_t shift = INSTR (23, 22);
- uint32_t imm = INSTR (21, 10);
- uint32_t dispatch = INSTR (31, 29);
- NYI_assert (28, 24, 0x11);
- if (shift > 1)
- HALT_UNALLOC;
- if (shift)
- imm <<= 12;
- switch (dispatch)
- {
- case 0: add32 (cpu, imm); break;
- case 1: adds32 (cpu, imm); break;
- case 2: sub32 (cpu, imm); break;
- case 3: subs32 (cpu, imm); break;
- case 4: add64 (cpu, imm); break;
- case 5: adds64 (cpu, imm); break;
- case 6: sub64 (cpu, imm); break;
- case 7: subs64 (cpu, imm); break;
- }
- }
- static void
- dexAddSubtractShiftedRegister (sim_cpu *cpu)
- {
- /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30,29] = op : 00 ==> ADD, 01 ==> ADDS, 10 ==> SUB, 11 ==> SUBS
- instr[28,24] = 01011
- instr[23,22] = shift : 0 ==> LSL, 1 ==> LSR, 2 ==> ASR, 3 ==> UNALLOC
- instr[21] = 0
- instr[20,16] = Rm
- instr[15,10] = count : must be 0xxxxx for 32 bit
- instr[9,5] = Rn
- instr[4,0] = Rd */
- uint32_t size = INSTR (31, 31);
- uint32_t count = INSTR (15, 10);
- Shift shiftType = INSTR (23, 22);
- NYI_assert (28, 24, 0x0B);
- NYI_assert (21, 21, 0);
- /* Shift encoded as ROR is unallocated. */
- if (shiftType == ROR)
- HALT_UNALLOC;
- /* 32 bit operations must have count[5] = 0
- or else we have an UNALLOC. */
- if (size == 0 && uimm (count, 5, 5))
- HALT_UNALLOC;
- /* Dispatch on size:op i.e instr [31,29]. */
- switch (INSTR (31, 29))
- {
- case 0: add32_shift (cpu, shiftType, count); break;
- case 1: adds32_shift (cpu, shiftType, count); break;
- case 2: sub32_shift (cpu, shiftType, count); break;
- case 3: subs32_shift (cpu, shiftType, count); break;
- case 4: add64_shift (cpu, shiftType, count); break;
- case 5: adds64_shift (cpu, shiftType, count); break;
- case 6: sub64_shift (cpu, shiftType, count); break;
- case 7: subs64_shift (cpu, shiftType, count); break;
- }
- }
- static void
- dexAddSubtractExtendedRegister (sim_cpu *cpu)
- {
- /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30] = op : 0 ==> ADD, 1 ==> SUB
- instr[29] = set? : 0 ==> no flags, 1 ==> set flags
- instr[28,24] = 01011
- instr[23,22] = opt : 0 ==> ok, 1,2,3 ==> UNALLOC
- instr[21] = 1
- instr[20,16] = Rm
- instr[15,13] = option : 000 ==> UXTB, 001 ==> UXTH,
- 000 ==> LSL|UXTW, 001 ==> UXTZ,
- 000 ==> SXTB, 001 ==> SXTH,
- 000 ==> SXTW, 001 ==> SXTX,
- instr[12,10] = shift : 0,1,2,3,4 ==> ok, 5,6,7 ==> UNALLOC
- instr[9,5] = Rn
- instr[4,0] = Rd */
- Extension extensionType = INSTR (15, 13);
- uint32_t shift = INSTR (12, 10);
- NYI_assert (28, 24, 0x0B);
- NYI_assert (21, 21, 1);
- /* Shift may not exceed 4. */
- if (shift > 4)
- HALT_UNALLOC;
- /* Dispatch on size:op:set?. */
- switch (INSTR (31, 29))
- {
- case 0: add32_ext (cpu, extensionType, shift); break;
- case 1: adds32_ext (cpu, extensionType, shift); break;
- case 2: sub32_ext (cpu, extensionType, shift); break;
- case 3: subs32_ext (cpu, extensionType, shift); break;
- case 4: add64_ext (cpu, extensionType, shift); break;
- case 5: adds64_ext (cpu, extensionType, shift); break;
- case 6: sub64_ext (cpu, extensionType, shift); break;
- case 7: subs64_ext (cpu, extensionType, shift); break;
- }
- }
- /* Conditional data processing
- Condition register is implicit 3rd source. */
- /* 32 bit add with carry. */
- /* N.B register args may not be SP. */
- static void
- adc32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u32 (cpu, rn, NO_SP)
- + aarch64_get_reg_u32 (cpu, rm, NO_SP)
- + IS_SET (C));
- }
- /* 64 bit add with carry */
- static void
- adc64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u64 (cpu, rn, NO_SP)
- + aarch64_get_reg_u64 (cpu, rm, NO_SP)
- + IS_SET (C));
- }
- /* 32 bit add with carry setting flags. */
- static void
- adcs32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t value2 = aarch64_get_reg_u32 (cpu, rm, NO_SP);
- uint32_t carry = IS_SET (C);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2 + carry);
- set_flags_for_add32 (cpu, value1, value2 + carry);
- }
- /* 64 bit add with carry setting flags. */
- static void
- adcs64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP);
- uint64_t carry = IS_SET (C);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 + value2 + carry);
- set_flags_for_add64 (cpu, value1, value2 + carry);
- }
- /* 32 bit sub with carry. */
- static void
- sbc32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5); /* ngc iff rn == 31. */
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u32 (cpu, rn, NO_SP)
- - aarch64_get_reg_u32 (cpu, rm, NO_SP)
- - 1 + IS_SET (C));
- }
- /* 64 bit sub with carry */
- static void
- sbc64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u64 (cpu, rn, NO_SP)
- - aarch64_get_reg_u64 (cpu, rm, NO_SP)
- - 1 + IS_SET (C));
- }
- /* 32 bit sub with carry setting flags */
- static void
- sbcs32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t value2 = aarch64_get_reg_u32 (cpu, rm, NO_SP);
- uint32_t carry = IS_SET (C);
- uint32_t result = value1 - value2 + 1 - carry;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, result);
- set_flags_for_sub32 (cpu, value1, value2 + 1 - carry);
- }
- /* 64 bit sub with carry setting flags */
- static void
- sbcs64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP);
- uint64_t carry = IS_SET (C);
- uint64_t result = value1 - value2 + 1 - carry;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, result);
- set_flags_for_sub64 (cpu, value1, value2 + 1 - carry);
- }
- static void
- dexAddSubtractWithCarry (sim_cpu *cpu)
- {
- /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30] = op : 0 ==> ADC, 1 ==> SBC
- instr[29] = set? : 0 ==> no flags, 1 ==> set flags
- instr[28,21] = 1 1010 000
- instr[20,16] = Rm
- instr[15,10] = op2 : 00000 ==> ok, ow ==> UNALLOC
- instr[9,5] = Rn
- instr[4,0] = Rd */
- uint32_t op2 = INSTR (15, 10);
- NYI_assert (28, 21, 0xD0);
- if (op2 != 0)
- HALT_UNALLOC;
- /* Dispatch on size:op:set?. */
- switch (INSTR (31, 29))
- {
- case 0: adc32 (cpu); break;
- case 1: adcs32 (cpu); break;
- case 2: sbc32 (cpu); break;
- case 3: sbcs32 (cpu); break;
- case 4: adc64 (cpu); break;
- case 5: adcs64 (cpu); break;
- case 6: sbc64 (cpu); break;
- case 7: sbcs64 (cpu); break;
- }
- }
- static uint32_t
- testConditionCode (sim_cpu *cpu, CondCode cc)
- {
- /* This should be reduceable to branchless logic
- by some careful testing of bits in CC followed
- by the requisite masking and combining of bits
- from the flag register.
- For now we do it with a switch. */
- int res;
- switch (cc)
- {
- case EQ: res = IS_SET (Z); break;
- case NE: res = IS_CLEAR (Z); break;
- case CS: res = IS_SET (C); break;
- case CC: res = IS_CLEAR (C); break;
- case MI: res = IS_SET (N); break;
- case PL: res = IS_CLEAR (N); break;
- case VS: res = IS_SET (V); break;
- case VC: res = IS_CLEAR (V); break;
- case HI: res = IS_SET (C) && IS_CLEAR (Z); break;
- case LS: res = IS_CLEAR (C) || IS_SET (Z); break;
- case GE: res = IS_SET (N) == IS_SET (V); break;
- case LT: res = IS_SET (N) != IS_SET (V); break;
- case GT: res = IS_CLEAR (Z) && (IS_SET (N) == IS_SET (V)); break;
- case LE: res = IS_SET (Z) || (IS_SET (N) != IS_SET (V)); break;
- case AL:
- case NV:
- default:
- res = 1;
- break;
- }
- return res;
- }
- static void
- CondCompare (sim_cpu *cpu) /* aka: ccmp and ccmn */
- {
- /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30] = compare with positive (1) or negative value (0)
- instr[29,21] = 1 1101 0010
- instr[20,16] = Rm or const
- instr[15,12] = cond
- instr[11] = compare reg (0) or const (1)
- instr[10] = 0
- instr[9,5] = Rn
- instr[4] = 0
- instr[3,0] = value for CPSR bits if the comparison does not take place. */
- signed int negate;
- unsigned rm;
- unsigned rn;
- NYI_assert (29, 21, 0x1d2);
- NYI_assert (10, 10, 0);
- NYI_assert (4, 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (! testConditionCode (cpu, INSTR (15, 12)))
- {
- aarch64_set_CPSR (cpu, INSTR (3, 0));
- return;
- }
- negate = INSTR (30, 30) ? 1 : -1;
- rm = INSTR (20, 16);
- rn = INSTR ( 9, 5);
- if (INSTR (31, 31))
- {
- if (INSTR (11, 11))
- set_flags_for_sub64 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK),
- negate * (uint64_t) rm);
- else
- set_flags_for_sub64 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK),
- negate * aarch64_get_reg_u64 (cpu, rm, SP_OK));
- }
- else
- {
- if (INSTR (11, 11))
- set_flags_for_sub32 (cpu, aarch64_get_reg_u32 (cpu, rn, SP_OK),
- negate * rm);
- else
- set_flags_for_sub32 (cpu, aarch64_get_reg_u32 (cpu, rn, SP_OK),
- negate * aarch64_get_reg_u32 (cpu, rm, SP_OK));
- }
- }
- static void
- do_vec_MOV_whole_vector (sim_cpu *cpu)
- {
- /* MOV Vd.T, Vs.T (alias for ORR Vd.T, Vn.T, Vm.T where Vn == Vm)
- instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,21] = 001110101
- instr[20,16] = Vs
- instr[15,10] = 000111
- instr[9,5] = Vs
- instr[4,0] = Vd */
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- NYI_assert (29, 21, 0x075);
- NYI_assert (15, 10, 0x07);
- if (INSTR (20, 16) != vs)
- HALT_NYI;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (30, 30))
- aarch64_set_vec_u64 (cpu, vd, 1, aarch64_get_vec_u64 (cpu, vs, 1));
- aarch64_set_vec_u64 (cpu, vd, 0, aarch64_get_vec_u64 (cpu, vs, 0));
- }
- static void
- do_vec_SMOV_into_scalar (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = word(0)/long(1)
- instr[29,21] = 00 1110 000
- instr[20,16] = element size and index
- instr[15,10] = 00 0010 11
- instr[9,5] = V source
- instr[4,0] = R dest */
- unsigned vs = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned imm5 = INSTR (20, 16);
- unsigned full = INSTR (30, 30);
- int size, index;
- NYI_assert (29, 21, 0x070);
- NYI_assert (15, 10, 0x0B);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (imm5 & 0x1)
- {
- size = 0;
- index = (imm5 >> 1) & 0xF;
- }
- else if (imm5 & 0x2)
- {
- size = 1;
- index = (imm5 >> 2) & 0x7;
- }
- else if (full && (imm5 & 0x4))
- {
- size = 2;
- index = (imm5 >> 3) & 0x3;
- }
- else
- HALT_UNALLOC;
- switch (size)
- {
- case 0:
- if (full)
- aarch64_set_reg_s64 (cpu, rd, NO_SP,
- aarch64_get_vec_s8 (cpu, vs, index));
- else
- aarch64_set_reg_s32 (cpu, rd, NO_SP,
- aarch64_get_vec_s8 (cpu, vs, index));
- break;
- case 1:
- if (full)
- aarch64_set_reg_s64 (cpu, rd, NO_SP,
- aarch64_get_vec_s16 (cpu, vs, index));
- else
- aarch64_set_reg_s32 (cpu, rd, NO_SP,
- aarch64_get_vec_s16 (cpu, vs, index));
- break;
- case 2:
- aarch64_set_reg_s64 (cpu, rd, NO_SP,
- aarch64_get_vec_s32 (cpu, vs, index));
- break;
- default:
- HALT_UNALLOC;
- }
- }
- static void
- do_vec_UMOV_into_scalar (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = word(0)/long(1)
- instr[29,21] = 00 1110 000
- instr[20,16] = element size and index
- instr[15,10] = 00 0011 11
- instr[9,5] = V source
- instr[4,0] = R dest */
- unsigned vs = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned imm5 = INSTR (20, 16);
- unsigned full = INSTR (30, 30);
- int size, index;
- NYI_assert (29, 21, 0x070);
- NYI_assert (15, 10, 0x0F);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (!full)
- {
- if (imm5 & 0x1)
- {
- size = 0;
- index = (imm5 >> 1) & 0xF;
- }
- else if (imm5 & 0x2)
- {
- size = 1;
- index = (imm5 >> 2) & 0x7;
- }
- else if (imm5 & 0x4)
- {
- size = 2;
- index = (imm5 >> 3) & 0x3;
- }
- else
- HALT_UNALLOC;
- }
- else if (imm5 & 0x8)
- {
- size = 3;
- index = (imm5 >> 4) & 0x1;
- }
- else
- HALT_UNALLOC;
- switch (size)
- {
- case 0:
- aarch64_set_reg_u32 (cpu, rd, NO_SP,
- aarch64_get_vec_u8 (cpu, vs, index));
- break;
- case 1:
- aarch64_set_reg_u32 (cpu, rd, NO_SP,
- aarch64_get_vec_u16 (cpu, vs, index));
- break;
- case 2:
- aarch64_set_reg_u32 (cpu, rd, NO_SP,
- aarch64_get_vec_u32 (cpu, vs, index));
- break;
- case 3:
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_vec_u64 (cpu, vs, index));
- break;
- default:
- HALT_UNALLOC;
- }
- }
- static void
- do_vec_INS (sim_cpu *cpu)
- {
- /* instr[31,21] = 01001110000
- instr[20,16] = element size and index
- instr[15,10] = 000111
- instr[9,5] = W source
- instr[4,0] = V dest */
- int index;
- unsigned rs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- NYI_assert (31, 21, 0x270);
- NYI_assert (15, 10, 0x07);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (16, 16))
- {
- index = INSTR (20, 17);
- aarch64_set_vec_u8 (cpu, vd, index,
- aarch64_get_reg_u8 (cpu, rs, NO_SP));
- }
- else if (INSTR (17, 17))
- {
- index = INSTR (20, 18);
- aarch64_set_vec_u16 (cpu, vd, index,
- aarch64_get_reg_u16 (cpu, rs, NO_SP));
- }
- else if (INSTR (18, 18))
- {
- index = INSTR (20, 19);
- aarch64_set_vec_u32 (cpu, vd, index,
- aarch64_get_reg_u32 (cpu, rs, NO_SP));
- }
- else if (INSTR (19, 19))
- {
- index = INSTR (20, 20);
- aarch64_set_vec_u64 (cpu, vd, index,
- aarch64_get_reg_u64 (cpu, rs, NO_SP));
- }
- else
- HALT_NYI;
- }
- static void
- do_vec_DUP_vector_into_vector (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,21] = 00 1110 000
- instr[20,16] = element size and index
- instr[15,10] = 0000 01
- instr[9,5] = V source
- instr[4,0] = V dest. */
- unsigned full = INSTR (30, 30);
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- int i, index;
- NYI_assert (29, 21, 0x070);
- NYI_assert (15, 10, 0x01);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (16, 16))
- {
- index = INSTR (20, 17);
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i, aarch64_get_vec_u8 (cpu, vs, index));
- }
- else if (INSTR (17, 17))
- {
- index = INSTR (20, 18);
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, vd, i, aarch64_get_vec_u16 (cpu, vs, index));
- }
- else if (INSTR (18, 18))
- {
- index = INSTR (20, 19);
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vs, index));
- }
- else
- {
- if (INSTR (19, 19) == 0)
- HALT_UNALLOC;
- if (! full)
- HALT_UNALLOC;
- index = INSTR (20, 20);
- for (i = 0; i < 2; i++)
- aarch64_set_vec_u64 (cpu, vd, i, aarch64_get_vec_u64 (cpu, vs, index));
- }
- }
- static void
- do_vec_TBL (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,21] = 00 1110 000
- instr[20,16] = Vm
- instr[15] = 0
- instr[14,13] = vec length
- instr[12,10] = 000
- instr[9,5] = V start
- instr[4,0] = V dest */
- int full = INSTR (30, 30);
- int len = INSTR (14, 13) + 1;
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (29, 21, 0x070);
- NYI_assert (12, 10, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = 0; i < (full ? 16 : 8); i++)
- {
- unsigned int selector = aarch64_get_vec_u8 (cpu, vm, i);
- uint8_t val;
- if (selector < 16)
- val = aarch64_get_vec_u8 (cpu, vn, selector);
- else if (selector < 32)
- val = len < 2 ? 0 : aarch64_get_vec_u8 (cpu, vn + 1, selector - 16);
- else if (selector < 48)
- val = len < 3 ? 0 : aarch64_get_vec_u8 (cpu, vn + 2, selector - 32);
- else if (selector < 64)
- val = len < 4 ? 0 : aarch64_get_vec_u8 (cpu, vn + 3, selector - 48);
- else
- val = 0;
- aarch64_set_vec_u8 (cpu, vd, i, val);
- }
- }
- static void
- do_vec_TRN (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,24] = 00 1110
- instr[23,22] = size
- instr[21] = 0
- instr[20,16] = Vm
- instr[15] = 0
- instr[14] = TRN1 (0) / TRN2 (1)
- instr[13,10] = 1010
- instr[9,5] = V source
- instr[4,0] = V dest. */
- int full = INSTR (30, 30);
- int second = INSTR (14, 14);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (13, 10, 0xA);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 8 : 4); i++)
- {
- aarch64_set_vec_u8
- (cpu, vd, i * 2,
- aarch64_get_vec_u8 (cpu, second ? vm : vn, i * 2));
- aarch64_set_vec_u8
- (cpu, vd, 1 * 2 + 1,
- aarch64_get_vec_u8 (cpu, second ? vn : vm, i * 2 + 1));
- }
- break;
- case 1:
- for (i = 0; i < (full ? 4 : 2); i++)
- {
- aarch64_set_vec_u16
- (cpu, vd, i * 2,
- aarch64_get_vec_u16 (cpu, second ? vm : vn, i * 2));
- aarch64_set_vec_u16
- (cpu, vd, 1 * 2 + 1,
- aarch64_get_vec_u16 (cpu, second ? vn : vm, i * 2 + 1));
- }
- break;
- case 2:
- aarch64_set_vec_u32
- (cpu, vd, 0, aarch64_get_vec_u32 (cpu, second ? vm : vn, 0));
- aarch64_set_vec_u32
- (cpu, vd, 1, aarch64_get_vec_u32 (cpu, second ? vn : vm, 1));
- aarch64_set_vec_u32
- (cpu, vd, 2, aarch64_get_vec_u32 (cpu, second ? vm : vn, 2));
- aarch64_set_vec_u32
- (cpu, vd, 3, aarch64_get_vec_u32 (cpu, second ? vn : vm, 3));
- break;
- case 3:
- if (! full)
- HALT_UNALLOC;
- aarch64_set_vec_u64 (cpu, vd, 0,
- aarch64_get_vec_u64 (cpu, second ? vm : vn, 0));
- aarch64_set_vec_u64 (cpu, vd, 1,
- aarch64_get_vec_u64 (cpu, second ? vn : vm, 1));
- break;
- }
- }
- static void
- do_vec_DUP_scalar_into_vector (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = 0=> zero top 64-bits, 1=> duplicate into top 64-bits
- [must be 1 for 64-bit xfer]
- instr[29,20] = 00 1110 0000
- instr[19,16] = element size: 0001=> 8-bits, 0010=> 16-bits,
- 0100=> 32-bits. 1000=>64-bits
- instr[15,10] = 0000 11
- instr[9,5] = W source
- instr[4,0] = V dest. */
- unsigned i;
- unsigned Vd = INSTR (4, 0);
- unsigned Rs = INSTR (9, 5);
- int both = INSTR (30, 30);
- NYI_assert (29, 20, 0x0E0);
- NYI_assert (15, 10, 0x03);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (19, 16))
- {
- case 1:
- for (i = 0; i < (both ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, Vd, i, aarch64_get_reg_u8 (cpu, Rs, NO_SP));
- break;
- case 2:
- for (i = 0; i < (both ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, Vd, i, aarch64_get_reg_u16 (cpu, Rs, NO_SP));
- break;
- case 4:
- for (i = 0; i < (both ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, Vd, i, aarch64_get_reg_u32 (cpu, Rs, NO_SP));
- break;
- case 8:
- if (!both)
- HALT_NYI;
- aarch64_set_vec_u64 (cpu, Vd, 0, aarch64_get_reg_u64 (cpu, Rs, NO_SP));
- aarch64_set_vec_u64 (cpu, Vd, 1, aarch64_get_reg_u64 (cpu, Rs, NO_SP));
- break;
- default:
- HALT_NYI;
- }
- }
- static void
- do_vec_UZP (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,24] = 00 1110
- instr[23,22] = size: byte(00), half(01), word (10), long (11)
- instr[21] = 0
- instr[20,16] = Vm
- instr[15] = 0
- instr[14] = lower (0) / upper (1)
- instr[13,10] = 0110
- instr[9,5] = Vn
- instr[4,0] = Vd. */
- int full = INSTR (30, 30);
- int upper = INSTR (14, 14);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- uint64_t val_m1 = aarch64_get_vec_u64 (cpu, vm, 0);
- uint64_t val_m2 = aarch64_get_vec_u64 (cpu, vm, 1);
- uint64_t val_n1 = aarch64_get_vec_u64 (cpu, vn, 0);
- uint64_t val_n2 = aarch64_get_vec_u64 (cpu, vn, 1);
- uint64_t val1;
- uint64_t val2;
- uint64_t input2 = full ? val_n2 : val_m1;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 21, 0);
- NYI_assert (15, 15, 0);
- NYI_assert (13, 10, 6);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- val1 = (val_n1 >> (upper * 8)) & 0xFFULL;
- val1 |= (val_n1 >> ((upper * 8) + 8)) & 0xFF00ULL;
- val1 |= (val_n1 >> ((upper * 8) + 16)) & 0xFF0000ULL;
- val1 |= (val_n1 >> ((upper * 8) + 24)) & 0xFF000000ULL;
- val1 |= (input2 << (32 - (upper * 8))) & 0xFF00000000ULL;
- val1 |= (input2 << (24 - (upper * 8))) & 0xFF0000000000ULL;
- val1 |= (input2 << (16 - (upper * 8))) & 0xFF000000000000ULL;
- val1 |= (input2 << (8 - (upper * 8))) & 0xFF00000000000000ULL;
- if (full)
- {
- val2 = (val_m1 >> (upper * 8)) & 0xFFULL;
- val2 |= (val_m1 >> ((upper * 8) + 8)) & 0xFF00ULL;
- val2 |= (val_m1 >> ((upper * 8) + 16)) & 0xFF0000ULL;
- val2 |= (val_m1 >> ((upper * 8) + 24)) & 0xFF000000ULL;
- val2 |= (val_m2 << (32 - (upper * 8))) & 0xFF00000000ULL;
- val2 |= (val_m2 << (24 - (upper * 8))) & 0xFF0000000000ULL;
- val2 |= (val_m2 << (16 - (upper * 8))) & 0xFF000000000000ULL;
- val2 |= (val_m2 << (8 - (upper * 8))) & 0xFF00000000000000ULL;
- }
- break;
- case 1:
- val1 = (val_n1 >> (upper * 16)) & 0xFFFFULL;
- val1 |= (val_n1 >> ((upper * 16) + 16)) & 0xFFFF0000ULL;
- val1 |= (input2 << (32 - (upper * 16))) & 0xFFFF00000000ULL;;
- val1 |= (input2 << (16 - (upper * 16))) & 0xFFFF000000000000ULL;
- if (full)
- {
- val2 = (val_m1 >> (upper * 16)) & 0xFFFFULL;
- val2 |= (val_m1 >> ((upper * 16) + 16)) & 0xFFFF0000ULL;
- val2 |= (val_m2 << (32 - (upper * 16))) & 0xFFFF00000000ULL;
- val2 |= (val_m2 << (16 - (upper * 16))) & 0xFFFF000000000000ULL;
- }
- break;
- case 2:
- val1 = (val_n1 >> (upper * 32)) & 0xFFFFFFFF;
- val1 |= (input2 << (32 - (upper * 32))) & 0xFFFFFFFF00000000ULL;
- if (full)
- {
- val2 = (val_m1 >> (upper * 32)) & 0xFFFFFFFF;
- val2 |= (val_m2 << (32 - (upper * 32))) & 0xFFFFFFFF00000000ULL;
- }
- break;
- case 3:
- if (! full)
- HALT_UNALLOC;
- val1 = upper ? val_n2 : val_n1;
- val2 = upper ? val_m2 : val_m1;
- break;
- }
- aarch64_set_vec_u64 (cpu, vd, 0, val1);
- if (full)
- aarch64_set_vec_u64 (cpu, vd, 1, val2);
- }
- static void
- do_vec_ZIP (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,24] = 00 1110
- instr[23,22] = size: byte(00), hald(01), word (10), long (11)
- instr[21] = 0
- instr[20,16] = Vm
- instr[15] = 0
- instr[14] = lower (0) / upper (1)
- instr[13,10] = 1110
- instr[9,5] = Vn
- instr[4,0] = Vd. */
- int full = INSTR (30, 30);
- int upper = INSTR (14, 14);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- uint64_t val_m1 = aarch64_get_vec_u64 (cpu, vm, 0);
- uint64_t val_m2 = aarch64_get_vec_u64 (cpu, vm, 1);
- uint64_t val_n1 = aarch64_get_vec_u64 (cpu, vn, 0);
- uint64_t val_n2 = aarch64_get_vec_u64 (cpu, vn, 1);
- uint64_t val1 = 0;
- uint64_t val2 = 0;
- uint64_t input1 = upper ? val_n1 : val_m1;
- uint64_t input2 = upper ? val_n2 : val_m2;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 21, 0);
- NYI_assert (15, 15, 0);
- NYI_assert (13, 10, 0xE);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 23))
- {
- case 0:
- val1 =
- ((input1 << 0) & (0xFF << 0))
- | ((input2 << 8) & (0xFF << 8))
- | ((input1 << 8) & (0xFF << 16))
- | ((input2 << 16) & (0xFF << 24))
- | ((input1 << 16) & (0xFFULL << 32))
- | ((input2 << 24) & (0xFFULL << 40))
- | ((input1 << 24) & (0xFFULL << 48))
- | ((input2 << 32) & (0xFFULL << 56));
- val2 =
- ((input1 >> 32) & (0xFF << 0))
- | ((input2 >> 24) & (0xFF << 8))
- | ((input1 >> 24) & (0xFF << 16))
- | ((input2 >> 16) & (0xFF << 24))
- | ((input1 >> 16) & (0xFFULL << 32))
- | ((input2 >> 8) & (0xFFULL << 40))
- | ((input1 >> 8) & (0xFFULL << 48))
- | ((input2 >> 0) & (0xFFULL << 56));
- break;
- case 1:
- val1 =
- ((input1 << 0) & (0xFFFF << 0))
- | ((input2 << 16) & (0xFFFF << 16))
- | ((input1 << 16) & (0xFFFFULL << 32))
- | ((input2 << 32) & (0xFFFFULL << 48));
- val2 =
- ((input1 >> 32) & (0xFFFF << 0))
- | ((input2 >> 16) & (0xFFFF << 16))
- | ((input1 >> 16) & (0xFFFFULL << 32))
- | ((input2 >> 0) & (0xFFFFULL << 48));
- break;
- case 2:
- val1 = (input1 & 0xFFFFFFFFULL) | (input2 << 32);
- val2 = (input2 & 0xFFFFFFFFULL) | (input1 << 32);
- break;
- case 3:
- val1 = input1;
- val2 = input2;
- break;
- }
- aarch64_set_vec_u64 (cpu, vd, 0, val1);
- if (full)
- aarch64_set_vec_u64 (cpu, vd, 1, val2);
- }
- /* Floating point immediates are encoded in 8 bits.
- fpimm[7] = sign bit.
- fpimm[6:4] = signed exponent.
- fpimm[3:0] = fraction (assuming leading 1).
- i.e. F = s * 1.f * 2^(e - b). */
- static float
- fp_immediate_for_encoding_32 (uint32_t imm8)
- {
- float u;
- uint32_t s, e, f, i;
- s = (imm8 >> 7) & 0x1;
- e = (imm8 >> 4) & 0x7;
- f = imm8 & 0xf;
- /* The fp value is s * n/16 * 2r where n is 16+e. */
- u = (16.0 + f) / 16.0;
- /* N.B. exponent is signed. */
- if (e < 4)
- {
- int epos = e;
- for (i = 0; i <= epos; i++)
- u *= 2.0;
- }
- else
- {
- int eneg = 7 - e;
- for (i = 0; i < eneg; i++)
- u /= 2.0;
- }
- if (s)
- u = - u;
- return u;
- }
- static double
- fp_immediate_for_encoding_64 (uint32_t imm8)
- {
- double u;
- uint32_t s, e, f, i;
- s = (imm8 >> 7) & 0x1;
- e = (imm8 >> 4) & 0x7;
- f = imm8 & 0xf;
- /* The fp value is s * n/16 * 2r where n is 16+e. */
- u = (16.0 + f) / 16.0;
- /* N.B. exponent is signed. */
- if (e < 4)
- {
- int epos = e;
- for (i = 0; i <= epos; i++)
- u *= 2.0;
- }
- else
- {
- int eneg = 7 - e;
- for (i = 0; i < eneg; i++)
- u /= 2.0;
- }
- if (s)
- u = - u;
- return u;
- }
- static void
- do_vec_MOV_immediate (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half selector
- instr[29,19] = 00111100000
- instr[18,16] = high 3 bits of uimm8
- instr[15,12] = size & shift:
- 0000 => 32-bit
- 0010 => 32-bit + LSL#8
- 0100 => 32-bit + LSL#16
- 0110 => 32-bit + LSL#24
- 1010 => 16-bit + LSL#8
- 1000 => 16-bit
- 1101 => 32-bit + MSL#16
- 1100 => 32-bit + MSL#8
- 1110 => 8-bit
- 1111 => double
- instr[11,10] = 01
- instr[9,5] = low 5-bits of uimm8
- instr[4,0] = Vd. */
- int full = INSTR (30, 30);
- unsigned vd = INSTR (4, 0);
- unsigned val = (INSTR (18, 16) << 5) | INSTR (9, 5);
- unsigned i;
- NYI_assert (29, 19, 0x1E0);
- NYI_assert (11, 10, 1);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (15, 12))
- {
- case 0x0: /* 32-bit, no shift. */
- case 0x2: /* 32-bit, shift by 8. */
- case 0x4: /* 32-bit, shift by 16. */
- case 0x6: /* 32-bit, shift by 24. */
- val <<= (8 * INSTR (14, 13));
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i, val);
- break;
- case 0xa: /* 16-bit, shift by 8. */
- val <<= 8;
- /* Fall through. */
- case 0x8: /* 16-bit, no shift. */
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, vd, i, val);
- break;
- case 0xd: /* 32-bit, mask shift by 16. */
- val <<= 8;
- val |= 0xFF;
- /* Fall through. */
- case 0xc: /* 32-bit, mask shift by 8. */
- val <<= 8;
- val |= 0xFF;
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i, val);
- break;
- case 0xe: /* 8-bit, no shift. */
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i, val);
- break;
- case 0xf: /* FMOV Vs.{2|4}S, #fpimm. */
- {
- float u = fp_immediate_for_encoding_32 (val);
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i, u);
- break;
- }
- default:
- HALT_NYI;
- }
- }
- static void
- do_vec_MVNI (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half selector
- instr[29,19] = 10111100000
- instr[18,16] = high 3 bits of uimm8
- instr[15,12] = selector
- instr[11,10] = 01
- instr[9,5] = low 5-bits of uimm8
- instr[4,0] = Vd. */
- int full = INSTR (30, 30);
- unsigned vd = INSTR (4, 0);
- unsigned val = (INSTR (18, 16) << 5) | INSTR (9, 5);
- unsigned i;
- NYI_assert (29, 19, 0x5E0);
- NYI_assert (11, 10, 1);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (15, 12))
- {
- case 0x0: /* 32-bit, no shift. */
- case 0x2: /* 32-bit, shift by 8. */
- case 0x4: /* 32-bit, shift by 16. */
- case 0x6: /* 32-bit, shift by 24. */
- val <<= (8 * INSTR (14, 13));
- val = ~ val;
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i, val);
- return;
- case 0xa: /* 16-bit, 8 bit shift. */
- val <<= 8;
- case 0x8: /* 16-bit, no shift. */
- val = ~ val;
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, vd, i, val);
- return;
- case 0xd: /* 32-bit, mask shift by 16. */
- val <<= 8;
- val |= 0xFF;
- case 0xc: /* 32-bit, mask shift by 8. */
- val <<= 8;
- val |= 0xFF;
- val = ~ val;
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i, val);
- return;
- case 0xE: /* MOVI Dn, #mask64 */
- {
- uint64_t mask = 0;
- for (i = 0; i < 8; i++)
- if (val & (1 << i))
- mask |= (0xFFUL << (i * 8));
- aarch64_set_vec_u64 (cpu, vd, 0, mask);
- aarch64_set_vec_u64 (cpu, vd, 1, mask);
- return;
- }
- case 0xf: /* FMOV Vd.2D, #fpimm. */
- {
- double u = fp_immediate_for_encoding_64 (val);
- if (! full)
- HALT_UNALLOC;
- aarch64_set_vec_double (cpu, vd, 0, u);
- aarch64_set_vec_double (cpu, vd, 1, u);
- return;
- }
- default:
- HALT_NYI;
- }
- }
- #define ABS(A) ((A) < 0 ? - (A) : (A))
- static void
- do_vec_ABS (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,24] = 00 1110
- instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit, 11=> 64-bit
- instr[21,10] = 10 0000 1011 10
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned full = INSTR (30, 30);
- unsigned i;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 10, 0x82E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_s8 (cpu, vd, i,
- ABS (aarch64_get_vec_s8 (cpu, vn, i)));
- break;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_s16 (cpu, vd, i,
- ABS (aarch64_get_vec_s16 (cpu, vn, i)));
- break;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_s32 (cpu, vd, i,
- ABS (aarch64_get_vec_s32 (cpu, vn, i)));
- break;
- case 3:
- if (! full)
- HALT_NYI;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_s64 (cpu, vd, i,
- ABS (aarch64_get_vec_s64 (cpu, vn, i)));
- break;
- }
- }
- static void
- do_vec_ADDV (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half selector
- instr[29,24] = 00 1110
- instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit, 11=> 64-bit
- instr[21,10] = 11 0001 1011 10
- instr[9,5] = Vm
- instr[4.0] = Rd. */
- unsigned vm = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 10, 0xC6E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- {
- uint8_t val = 0;
- for (i = 0; i < (full ? 16 : 8); i++)
- val += aarch64_get_vec_u8 (cpu, vm, i);
- aarch64_set_vec_u64 (cpu, rd, 0, val);
- return;
- }
- case 1:
- {
- uint16_t val = 0;
- for (i = 0; i < (full ? 8 : 4); i++)
- val += aarch64_get_vec_u16 (cpu, vm, i);
- aarch64_set_vec_u64 (cpu, rd, 0, val);
- return;
- }
- case 2:
- {
- uint32_t val = 0;
- if (! full)
- HALT_UNALLOC;
- for (i = 0; i < 4; i++)
- val += aarch64_get_vec_u32 (cpu, vm, i);
- aarch64_set_vec_u64 (cpu, rd, 0, val);
- return;
- }
- case 3:
- HALT_UNALLOC;
- }
- }
- static void
- do_vec_ins_2 (sim_cpu *cpu)
- {
- /* instr[31,21] = 01001110000
- instr[20,18] = size & element selector
- instr[17,14] = 0000
- instr[13] = direction: to vec(0), from vec (1)
- instr[12,10] = 111
- instr[9,5] = Vm
- instr[4,0] = Vd. */
- unsigned elem;
- unsigned vm = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- NYI_assert (31, 21, 0x270);
- NYI_assert (17, 14, 0);
- NYI_assert (12, 10, 7);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (13, 13) == 1)
- {
- if (INSTR (18, 18) == 1)
- {
- /* 32-bit moves. */
- elem = INSTR (20, 19);
- aarch64_set_reg_u64 (cpu, vd, NO_SP,
- aarch64_get_vec_u32 (cpu, vm, elem));
- }
- else
- {
- /* 64-bit moves. */
- if (INSTR (19, 19) != 1)
- HALT_NYI;
- elem = INSTR (20, 20);
- aarch64_set_reg_u64 (cpu, vd, NO_SP,
- aarch64_get_vec_u64 (cpu, vm, elem));
- }
- }
- else
- {
- if (INSTR (18, 18) == 1)
- {
- /* 32-bit moves. */
- elem = INSTR (20, 19);
- aarch64_set_vec_u32 (cpu, vd, elem,
- aarch64_get_reg_u32 (cpu, vm, NO_SP));
- }
- else
- {
- /* 64-bit moves. */
- if (INSTR (19, 19) != 1)
- HALT_NYI;
- elem = INSTR (20, 20);
- aarch64_set_vec_u64 (cpu, vd, elem,
- aarch64_get_reg_u64 (cpu, vm, NO_SP));
- }
- }
- }
- #define DO_VEC_WIDENING_MUL(N, DST_TYPE, READ_TYPE, WRITE_TYPE) \
- do \
- { \
- DST_TYPE a[N], b[N]; \
- \
- for (i = 0; i < (N); i++) \
- { \
- a[i] = aarch64_get_vec_##READ_TYPE (cpu, vn, i + bias); \
- b[i] = aarch64_get_vec_##READ_TYPE (cpu, vm, i + bias); \
- } \
- for (i = 0; i < (N); i++) \
- aarch64_set_vec_##WRITE_TYPE (cpu, vd, i, a[i] * b[i]); \
- } \
- while (0)
- static void
- do_vec_mull (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = lower(0)/upper(1) selector
- instr[29] = signed(0)/unsigned(1)
- instr[28,24] = 0 1110
- instr[23,22] = size: 8-bit (00), 16-bit (01), 32-bit (10)
- instr[21] = 1
- instr[20,16] = Vm
- instr[15,10] = 11 0000
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- int unsign = INSTR (29, 29);
- int bias = INSTR (30, 30);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR ( 9, 5);
- unsigned vd = INSTR ( 4, 0);
- unsigned i;
- NYI_assert (28, 24, 0x0E);
- NYI_assert (15, 10, 0x30);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* NB: Read source values before writing results, in case
- the source and destination vectors are the same. */
- switch (INSTR (23, 22))
- {
- case 0:
- if (bias)
- bias = 8;
- if (unsign)
- DO_VEC_WIDENING_MUL (8, uint16_t, u8, u16);
- else
- DO_VEC_WIDENING_MUL (8, int16_t, s8, s16);
- return;
- case 1:
- if (bias)
- bias = 4;
- if (unsign)
- DO_VEC_WIDENING_MUL (4, uint32_t, u16, u32);
- else
- DO_VEC_WIDENING_MUL (4, int32_t, s16, s32);
- return;
- case 2:
- if (bias)
- bias = 2;
- if (unsign)
- DO_VEC_WIDENING_MUL (2, uint64_t, u32, u64);
- else
- DO_VEC_WIDENING_MUL (2, int64_t, s32, s64);
- return;
- case 3:
- HALT_NYI;
- }
- }
- static void
- do_vec_fadd (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,24] = 001110
- instr[23] = FADD(0)/FSUB(1)
- instr[22] = float (0)/double(1)
- instr[21] = 1
- instr[20,16] = Vm
- instr[15,10] = 110101
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x35);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (23, 23))
- {
- if (INSTR (22, 22))
- {
- if (! full)
- HALT_NYI;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, vd, i,
- aarch64_get_vec_double (cpu, vn, i)
- - aarch64_get_vec_double (cpu, vm, i));
- }
- else
- {
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i,
- aarch64_get_vec_float (cpu, vn, i)
- - aarch64_get_vec_float (cpu, vm, i));
- }
- }
- else
- {
- if (INSTR (22, 22))
- {
- if (! full)
- HALT_NYI;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, vd, i,
- aarch64_get_vec_double (cpu, vm, i)
- + aarch64_get_vec_double (cpu, vn, i));
- }
- else
- {
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i,
- aarch64_get_vec_float (cpu, vm, i)
- + aarch64_get_vec_float (cpu, vn, i));
- }
- }
- }
- static void
- do_vec_add (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half selector
- instr[29,24] = 001110
- instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit, 11=> 64-bit
- instr[21] = 1
- instr[20,16] = Vn
- instr[15,10] = 100001
- instr[9,5] = Vm
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x21);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i, aarch64_get_vec_u8 (cpu, vn, i)
- + aarch64_get_vec_u8 (cpu, vm, i));
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, vd, i, aarch64_get_vec_u16 (cpu, vn, i)
- + aarch64_get_vec_u16 (cpu, vm, i));
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vn, i)
- + aarch64_get_vec_u32 (cpu, vm, i));
- return;
- case 3:
- if (! full)
- HALT_UNALLOC;
- aarch64_set_vec_u64 (cpu, vd, 0, aarch64_get_vec_u64 (cpu, vn, 0)
- + aarch64_get_vec_u64 (cpu, vm, 0));
- aarch64_set_vec_u64 (cpu, vd, 1,
- aarch64_get_vec_u64 (cpu, vn, 1)
- + aarch64_get_vec_u64 (cpu, vm, 1));
- return;
- }
- }
- static void
- do_vec_mul (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half selector
- instr[29,24] = 00 1110
- instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit
- instr[21] = 1
- instr[20,16] = Vn
- instr[15,10] = 10 0111
- instr[9,5] = Vm
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- int bias = 0;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x27);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- DO_VEC_WIDENING_MUL (full ? 16 : 8, uint8_t, u8, u8);
- return;
- case 1:
- DO_VEC_WIDENING_MUL (full ? 8 : 4, uint16_t, u16, u16);
- return;
- case 2:
- DO_VEC_WIDENING_MUL (full ? 4 : 2, uint32_t, u32, u32);
- return;
- case 3:
- HALT_UNALLOC;
- }
- }
- static void
- do_vec_MLA (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half selector
- instr[29,24] = 00 1110
- instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit
- instr[21] = 1
- instr[20,16] = Vn
- instr[15,10] = 1001 01
- instr[9,5] = Vm
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x25);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i,
- aarch64_get_vec_u8 (cpu, vd, i)
- + (aarch64_get_vec_u8 (cpu, vn, i)
- * aarch64_get_vec_u8 (cpu, vm, i)));
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, vd, i,
- aarch64_get_vec_u16 (cpu, vd, i)
- + (aarch64_get_vec_u16 (cpu, vn, i)
- * aarch64_get_vec_u16 (cpu, vm, i)));
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i,
- aarch64_get_vec_u32 (cpu, vd, i)
- + (aarch64_get_vec_u32 (cpu, vn, i)
- * aarch64_get_vec_u32 (cpu, vm, i)));
- return;
- default:
- HALT_UNALLOC;
- }
- }
- static float
- fmaxnm (float a, float b)
- {
- if (! isnan (a))
- {
- if (! isnan (b))
- return a > b ? a : b;
- return a;
- }
- else if (! isnan (b))
- return b;
- return a;
- }
- static float
- fminnm (float a, float b)
- {
- if (! isnan (a))
- {
- if (! isnan (b))
- return a < b ? a : b;
- return a;
- }
- else if (! isnan (b))
- return b;
- return a;
- }
- static double
- dmaxnm (double a, double b)
- {
- if (! isnan (a))
- {
- if (! isnan (b))
- return a > b ? a : b;
- return a;
- }
- else if (! isnan (b))
- return b;
- return a;
- }
- static double
- dminnm (double a, double b)
- {
- if (! isnan (a))
- {
- if (! isnan (b))
- return a < b ? a : b;
- return a;
- }
- else if (! isnan (b))
- return b;
- return a;
- }
- static void
- do_vec_FminmaxNMP (sim_cpu *cpu)
- {
- /* instr [31] = 0
- instr [30] = half (0)/full (1)
- instr [29,24] = 10 1110
- instr [23] = max(0)/min(1)
- instr [22] = float (0)/double (1)
- instr [21] = 1
- instr [20,16] = Vn
- instr [15,10] = 1100 01
- instr [9,5] = Vm
- instr [4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- int full = INSTR (30, 30);
- NYI_assert (29, 24, 0x2E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x31);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- double (* fn)(double, double) = INSTR (23, 23)
- ? dminnm : dmaxnm;
- if (! full)
- HALT_NYI;
- aarch64_set_vec_double (cpu, vd, 0,
- fn (aarch64_get_vec_double (cpu, vn, 0),
- aarch64_get_vec_double (cpu, vn, 1)));
- aarch64_set_vec_double (cpu, vd, 0,
- fn (aarch64_get_vec_double (cpu, vm, 0),
- aarch64_get_vec_double (cpu, vm, 1)));
- }
- else
- {
- float (* fn)(float, float) = INSTR (23, 23)
- ? fminnm : fmaxnm;
- aarch64_set_vec_float (cpu, vd, 0,
- fn (aarch64_get_vec_float (cpu, vn, 0),
- aarch64_get_vec_float (cpu, vn, 1)));
- if (full)
- aarch64_set_vec_float (cpu, vd, 1,
- fn (aarch64_get_vec_float (cpu, vn, 2),
- aarch64_get_vec_float (cpu, vn, 3)));
- aarch64_set_vec_float (cpu, vd, (full ? 2 : 1),
- fn (aarch64_get_vec_float (cpu, vm, 0),
- aarch64_get_vec_float (cpu, vm, 1)));
- if (full)
- aarch64_set_vec_float (cpu, vd, 3,
- fn (aarch64_get_vec_float (cpu, vm, 2),
- aarch64_get_vec_float (cpu, vm, 3)));
- }
- }
- static void
- do_vec_AND (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/full (1)
- instr[29,21] = 001110001
- instr[20,16] = Vm
- instr[15,10] = 000111
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 21, 0x071);
- NYI_assert (15, 10, 0x07);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i,
- aarch64_get_vec_u32 (cpu, vn, i)
- & aarch64_get_vec_u32 (cpu, vm, i));
- }
- static void
- do_vec_BSL (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/full (1)
- instr[29,21] = 101110011
- instr[20,16] = Vm
- instr[15,10] = 000111
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 21, 0x173);
- NYI_assert (15, 10, 0x07);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i,
- ( aarch64_get_vec_u8 (cpu, vd, i)
- & aarch64_get_vec_u8 (cpu, vn, i))
- | ((~ aarch64_get_vec_u8 (cpu, vd, i))
- & aarch64_get_vec_u8 (cpu, vm, i)));
- }
- static void
- do_vec_EOR (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/full (1)
- instr[29,21] = 10 1110 001
- instr[20,16] = Vm
- instr[15,10] = 000111
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 21, 0x171);
- NYI_assert (15, 10, 0x07);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i,
- aarch64_get_vec_u32 (cpu, vn, i)
- ^ aarch64_get_vec_u32 (cpu, vm, i));
- }
- static void
- do_vec_bit (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/full (1)
- instr[29,23] = 10 1110 1
- instr[22] = BIT (0) / BIF (1)
- instr[21] = 1
- instr[20,16] = Vm
- instr[15,10] = 0001 11
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned full = INSTR (30, 30);
- unsigned test_false = INSTR (22, 22);
- unsigned i;
- NYI_assert (29, 23, 0x5D);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x07);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = 0; i < (full ? 4 : 2); i++)
- {
- uint32_t vd_val = aarch64_get_vec_u32 (cpu, vd, i);
- uint32_t vn_val = aarch64_get_vec_u32 (cpu, vn, i);
- uint32_t vm_val = aarch64_get_vec_u32 (cpu, vm, i);
- if (test_false)
- aarch64_set_vec_u32 (cpu, vd, i,
- (vd_val & vm_val) | (vn_val & ~vm_val));
- else
- aarch64_set_vec_u32 (cpu, vd, i,
- (vd_val & ~vm_val) | (vn_val & vm_val));
- }
- }
- static void
- do_vec_ORN (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/full (1)
- instr[29,21] = 00 1110 111
- instr[20,16] = Vm
- instr[15,10] = 00 0111
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 21, 0x077);
- NYI_assert (15, 10, 0x07);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i,
- aarch64_get_vec_u8 (cpu, vn, i)
- | ~ aarch64_get_vec_u8 (cpu, vm, i));
- }
- static void
- do_vec_ORR (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/full (1)
- instr[29,21] = 00 1110 101
- instr[20,16] = Vm
- instr[15,10] = 0001 11
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 21, 0x075);
- NYI_assert (15, 10, 0x07);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i,
- aarch64_get_vec_u8 (cpu, vn, i)
- | aarch64_get_vec_u8 (cpu, vm, i));
- }
- static void
- do_vec_BIC (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/full (1)
- instr[29,21] = 00 1110 011
- instr[20,16] = Vm
- instr[15,10] = 00 0111
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 21, 0x073);
- NYI_assert (15, 10, 0x07);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i,
- aarch64_get_vec_u8 (cpu, vn, i)
- & ~ aarch64_get_vec_u8 (cpu, vm, i));
- }
- static void
- do_vec_XTN (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = first part (0)/ second part (1)
- instr[29,24] = 00 1110
- instr[23,22] = size: byte(00), half(01), word (10)
- instr[21,10] = 1000 0100 1010
- instr[9,5] = Vs
- instr[4,0] = Vd. */
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned bias = INSTR (30, 30);
- unsigned i;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 10, 0x84A);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < 8; i++)
- aarch64_set_vec_u8 (cpu, vd, i + (bias * 8),
- aarch64_get_vec_u16 (cpu, vs, i));
- return;
- case 1:
- for (i = 0; i < 4; i++)
- aarch64_set_vec_u16 (cpu, vd, i + (bias * 4),
- aarch64_get_vec_u32 (cpu, vs, i));
- return;
- case 2:
- for (i = 0; i < 2; i++)
- aarch64_set_vec_u32 (cpu, vd, i + (bias * 2),
- aarch64_get_vec_u64 (cpu, vs, i));
- return;
- }
- }
- /* Return the number of bits set in the input value. */
- #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
- # define popcount __builtin_popcount
- #else
- static int
- popcount (unsigned char x)
- {
- static const unsigned char popcnt[16] =
- {
- 0, 1, 1, 2,
- 1, 2, 2, 3,
- 1, 2, 2, 3,
- 2, 3, 3, 4
- };
- /* Only counts the low 8 bits of the input as that is all we need. */
- return popcnt[x % 16] + popcnt[x / 16];
- }
- #endif
- static void
- do_vec_CNT (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/ full (1)
- instr[29,24] = 00 1110
- instr[23,22] = size: byte(00)
- instr[21,10] = 1000 0001 0110
- instr[9,5] = Vs
- instr[4,0] = Vd. */
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- int full = INSTR (30, 30);
- int size = INSTR (23, 22);
- int i;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 10, 0x816);
- if (size != 0)
- HALT_UNALLOC;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i,
- popcount (aarch64_get_vec_u8 (cpu, vs, i)));
- }
- static void
- do_vec_maxv (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29] = signed (0)/unsigned(1)
- instr[28,24] = 0 1110
- instr[23,22] = size: byte(00), half(01), word (10)
- instr[21] = 1
- instr[20,17] = 1 000
- instr[16] = max(0)/min(1)
- instr[15,10] = 1010 10
- instr[9,5] = V source
- instr[4.0] = R dest. */
- unsigned vs = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned full = INSTR (30, 30);
- unsigned i;
- NYI_assert (28, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (20, 17, 8);
- NYI_assert (15, 10, 0x2A);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch ((INSTR (29, 29) << 1) | INSTR (16, 16))
- {
- case 0: /* SMAXV. */
- {
- int64_t smax;
- switch (INSTR (23, 22))
- {
- case 0:
- smax = aarch64_get_vec_s8 (cpu, vs, 0);
- for (i = 1; i < (full ? 16 : 8); i++)
- smax = max (smax, aarch64_get_vec_s8 (cpu, vs, i));
- break;
- case 1:
- smax = aarch64_get_vec_s16 (cpu, vs, 0);
- for (i = 1; i < (full ? 8 : 4); i++)
- smax = max (smax, aarch64_get_vec_s16 (cpu, vs, i));
- break;
- case 2:
- smax = aarch64_get_vec_s32 (cpu, vs, 0);
- for (i = 1; i < (full ? 4 : 2); i++)
- smax = max (smax, aarch64_get_vec_s32 (cpu, vs, i));
- break;
- case 3:
- HALT_UNALLOC;
- }
- aarch64_set_reg_s64 (cpu, rd, NO_SP, smax);
- return;
- }
- case 1: /* SMINV. */
- {
- int64_t smin;
- switch (INSTR (23, 22))
- {
- case 0:
- smin = aarch64_get_vec_s8 (cpu, vs, 0);
- for (i = 1; i < (full ? 16 : 8); i++)
- smin = min (smin, aarch64_get_vec_s8 (cpu, vs, i));
- break;
- case 1:
- smin = aarch64_get_vec_s16 (cpu, vs, 0);
- for (i = 1; i < (full ? 8 : 4); i++)
- smin = min (smin, aarch64_get_vec_s16 (cpu, vs, i));
- break;
- case 2:
- smin = aarch64_get_vec_s32 (cpu, vs, 0);
- for (i = 1; i < (full ? 4 : 2); i++)
- smin = min (smin, aarch64_get_vec_s32 (cpu, vs, i));
- break;
- case 3:
- HALT_UNALLOC;
- }
- aarch64_set_reg_s64 (cpu, rd, NO_SP, smin);
- return;
- }
- case 2: /* UMAXV. */
- {
- uint64_t umax;
- switch (INSTR (23, 22))
- {
- case 0:
- umax = aarch64_get_vec_u8 (cpu, vs, 0);
- for (i = 1; i < (full ? 16 : 8); i++)
- umax = max (umax, aarch64_get_vec_u8 (cpu, vs, i));
- break;
- case 1:
- umax = aarch64_get_vec_u16 (cpu, vs, 0);
- for (i = 1; i < (full ? 8 : 4); i++)
- umax = max (umax, aarch64_get_vec_u16 (cpu, vs, i));
- break;
- case 2:
- umax = aarch64_get_vec_u32 (cpu, vs, 0);
- for (i = 1; i < (full ? 4 : 2); i++)
- umax = max (umax, aarch64_get_vec_u32 (cpu, vs, i));
- break;
- case 3:
- HALT_UNALLOC;
- }
- aarch64_set_reg_u64 (cpu, rd, NO_SP, umax);
- return;
- }
- case 3: /* UMINV. */
- {
- uint64_t umin;
- switch (INSTR (23, 22))
- {
- case 0:
- umin = aarch64_get_vec_u8 (cpu, vs, 0);
- for (i = 1; i < (full ? 16 : 8); i++)
- umin = min (umin, aarch64_get_vec_u8 (cpu, vs, i));
- break;
- case 1:
- umin = aarch64_get_vec_u16 (cpu, vs, 0);
- for (i = 1; i < (full ? 8 : 4); i++)
- umin = min (umin, aarch64_get_vec_u16 (cpu, vs, i));
- break;
- case 2:
- umin = aarch64_get_vec_u32 (cpu, vs, 0);
- for (i = 1; i < (full ? 4 : 2); i++)
- umin = min (umin, aarch64_get_vec_u32 (cpu, vs, i));
- break;
- case 3:
- HALT_UNALLOC;
- }
- aarch64_set_reg_u64 (cpu, rd, NO_SP, umin);
- return;
- }
- }
- }
- static void
- do_vec_fminmaxV (sim_cpu *cpu)
- {
- /* instr[31,24] = 0110 1110
- instr[23] = max(0)/min(1)
- instr[22,14] = 011 0000 11
- instr[13,12] = nm(00)/normal(11)
- instr[11,10] = 10
- instr[9,5] = V source
- instr[4.0] = R dest. */
- unsigned vs = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned i;
- float res = aarch64_get_vec_float (cpu, vs, 0);
- NYI_assert (31, 24, 0x6E);
- NYI_assert (22, 14, 0x0C3);
- NYI_assert (11, 10, 2);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (23, 23))
- {
- switch (INSTR (13, 12))
- {
- case 0: /* FMNINNMV. */
- for (i = 1; i < 4; i++)
- res = fminnm (res, aarch64_get_vec_float (cpu, vs, i));
- break;
- case 3: /* FMINV. */
- for (i = 1; i < 4; i++)
- res = min (res, aarch64_get_vec_float (cpu, vs, i));
- break;
- default:
- HALT_NYI;
- }
- }
- else
- {
- switch (INSTR (13, 12))
- {
- case 0: /* FMNAXNMV. */
- for (i = 1; i < 4; i++)
- res = fmaxnm (res, aarch64_get_vec_float (cpu, vs, i));
- break;
- case 3: /* FMAXV. */
- for (i = 1; i < 4; i++)
- res = max (res, aarch64_get_vec_float (cpu, vs, i));
- break;
- default:
- HALT_NYI;
- }
- }
- aarch64_set_FP_float (cpu, rd, res);
- }
- static void
- do_vec_Fminmax (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,24] = 00 1110
- instr[23] = max(0)/min(1)
- instr[22] = float(0)/double(1)
- instr[21] = 1
- instr[20,16] = Vm
- instr[15,14] = 11
- instr[13,12] = nm(00)/normal(11)
- instr[11,10] = 01
- instr[9,5] = Vn
- instr[4,0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned full = INSTR (30, 30);
- unsigned min = INSTR (23, 23);
- unsigned i;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 14, 3);
- NYI_assert (11, 10, 1);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- double (* func)(double, double);
- if (! full)
- HALT_NYI;
- if (INSTR (13, 12) == 0)
- func = min ? dminnm : dmaxnm;
- else if (INSTR (13, 12) == 3)
- func = min ? fmin : fmax;
- else
- HALT_NYI;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, vd, i,
- func (aarch64_get_vec_double (cpu, vn, i),
- aarch64_get_vec_double (cpu, vm, i)));
- }
- else
- {
- float (* func)(float, float);
- if (INSTR (13, 12) == 0)
- func = min ? fminnm : fmaxnm;
- else if (INSTR (13, 12) == 3)
- func = min ? fminf : fmaxf;
- else
- HALT_NYI;
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i,
- func (aarch64_get_vec_float (cpu, vn, i),
- aarch64_get_vec_float (cpu, vm, i)));
- }
- }
- static void
- do_vec_SCVTF (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = Q
- instr[29,23] = 00 1110 0
- instr[22] = float(0)/double(1)
- instr[21,10] = 10 0001 1101 10
- instr[9,5] = Vn
- instr[4,0] = Vd. */
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned full = INSTR (30, 30);
- unsigned size = INSTR (22, 22);
- unsigned i;
- NYI_assert (29, 23, 0x1C);
- NYI_assert (21, 10, 0x876);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (size)
- {
- if (! full)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- {
- double val = (double) aarch64_get_vec_u64 (cpu, vn, i);
- aarch64_set_vec_double (cpu, vd, i, val);
- }
- }
- else
- {
- for (i = 0; i < (full ? 4 : 2); i++)
- {
- float val = (float) aarch64_get_vec_u32 (cpu, vn, i);
- aarch64_set_vec_float (cpu, vd, i, val);
- }
- }
- }
- #define VEC_CMP(SOURCE, CMP) \
- do \
- { \
- switch (size) \
- { \
- case 0: \
- for (i = 0; i < (full ? 16 : 8); i++) \
- aarch64_set_vec_u8 (cpu, vd, i, \
- aarch64_get_vec_##SOURCE##8 (cpu, vn, i) \
- CMP \
- aarch64_get_vec_##SOURCE##8 (cpu, vm, i) \
- ? -1 : 0); \
- return; \
- case 1: \
- for (i = 0; i < (full ? 8 : 4); i++) \
- aarch64_set_vec_u16 (cpu, vd, i, \
- aarch64_get_vec_##SOURCE##16 (cpu, vn, i) \
- CMP \
- aarch64_get_vec_##SOURCE##16 (cpu, vm, i) \
- ? -1 : 0); \
- return; \
- case 2: \
- for (i = 0; i < (full ? 4 : 2); i++) \
- aarch64_set_vec_u32 (cpu, vd, i, \
- aarch64_get_vec_##SOURCE##32 (cpu, vn, i) \
- CMP \
- aarch64_get_vec_##SOURCE##32 (cpu, vm, i) \
- ? -1 : 0); \
- return; \
- case 3: \
- if (! full) \
- HALT_UNALLOC; \
- for (i = 0; i < 2; i++) \
- aarch64_set_vec_u64 (cpu, vd, i, \
- aarch64_get_vec_##SOURCE##64 (cpu, vn, i) \
- CMP \
- aarch64_get_vec_##SOURCE##64 (cpu, vm, i) \
- ? -1ULL : 0); \
- return; \
- } \
- } \
- while (0)
- #define VEC_CMP0(SOURCE, CMP) \
- do \
- { \
- switch (size) \
- { \
- case 0: \
- for (i = 0; i < (full ? 16 : 8); i++) \
- aarch64_set_vec_u8 (cpu, vd, i, \
- aarch64_get_vec_##SOURCE##8 (cpu, vn, i) \
- CMP 0 ? -1 : 0); \
- return; \
- case 1: \
- for (i = 0; i < (full ? 8 : 4); i++) \
- aarch64_set_vec_u16 (cpu, vd, i, \
- aarch64_get_vec_##SOURCE##16 (cpu, vn, i) \
- CMP 0 ? -1 : 0); \
- return; \
- case 2: \
- for (i = 0; i < (full ? 4 : 2); i++) \
- aarch64_set_vec_u32 (cpu, vd, i, \
- aarch64_get_vec_##SOURCE##32 (cpu, vn, i) \
- CMP 0 ? -1 : 0); \
- return; \
- case 3: \
- if (! full) \
- HALT_UNALLOC; \
- for (i = 0; i < 2; i++) \
- aarch64_set_vec_u64 (cpu, vd, i, \
- aarch64_get_vec_##SOURCE##64 (cpu, vn, i) \
- CMP 0 ? -1ULL : 0); \
- return; \
- } \
- } \
- while (0)
- #define VEC_FCMP0(CMP) \
- do \
- { \
- if (vm != 0) \
- HALT_NYI; \
- if (INSTR (22, 22)) \
- { \
- if (! full) \
- HALT_NYI; \
- for (i = 0; i < 2; i++) \
- aarch64_set_vec_u64 (cpu, vd, i, \
- aarch64_get_vec_double (cpu, vn, i) \
- CMP 0.0 ? -1 : 0); \
- } \
- else \
- { \
- for (i = 0; i < (full ? 4 : 2); i++) \
- aarch64_set_vec_u32 (cpu, vd, i, \
- aarch64_get_vec_float (cpu, vn, i) \
- CMP 0.0 ? -1 : 0); \
- } \
- return; \
- } \
- while (0)
- #define VEC_FCMP(CMP) \
- do \
- { \
- if (INSTR (22, 22)) \
- { \
- if (! full) \
- HALT_NYI; \
- for (i = 0; i < 2; i++) \
- aarch64_set_vec_u64 (cpu, vd, i, \
- aarch64_get_vec_double (cpu, vn, i) \
- CMP \
- aarch64_get_vec_double (cpu, vm, i) \
- ? -1 : 0); \
- } \
- else \
- { \
- for (i = 0; i < (full ? 4 : 2); i++) \
- aarch64_set_vec_u32 (cpu, vd, i, \
- aarch64_get_vec_float (cpu, vn, i) \
- CMP \
- aarch64_get_vec_float (cpu, vm, i) \
- ? -1 : 0); \
- } \
- return; \
- } \
- while (0)
- static void
- do_vec_compare (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29] = part-of-comparison-type
- instr[28,24] = 0 1110
- instr[23,22] = size of integer compares: byte(00), half(01), word (10), long (11)
- type of float compares: single (-0) / double (-1)
- instr[21] = 1
- instr[20,16] = Vm or 00000 (compare vs 0)
- instr[15,10] = part-of-comparison-type
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- int full = INSTR (30, 30);
- int size = INSTR (23, 22);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (28, 24, 0x0E);
- NYI_assert (21, 21, 1);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if ((INSTR (11, 11)
- && INSTR (14, 14))
- || ((INSTR (11, 11) == 0
- && INSTR (10, 10) == 0)))
- {
- /* A compare vs 0. */
- if (vm != 0)
- {
- if (INSTR (15, 10) == 0x2A)
- do_vec_maxv (cpu);
- else if (INSTR (15, 10) == 0x32
- || INSTR (15, 10) == 0x3E)
- do_vec_fminmaxV (cpu);
- else if (INSTR (29, 23) == 0x1C
- && INSTR (21, 10) == 0x876)
- do_vec_SCVTF (cpu);
- else
- HALT_NYI;
- return;
- }
- }
- if (INSTR (14, 14))
- {
- /* A floating point compare. */
- unsigned decode = (INSTR (29, 29) << 5) | (INSTR (23, 23) << 4)
- | INSTR (13, 10);
- NYI_assert (15, 15, 1);
- switch (decode)
- {
- case /* 0b010010: GT#0 */ 0x12: VEC_FCMP0 (>);
- case /* 0b110010: GE#0 */ 0x32: VEC_FCMP0 (>=);
- case /* 0b010110: EQ#0 */ 0x16: VEC_FCMP0 (==);
- case /* 0b110110: LE#0 */ 0x36: VEC_FCMP0 (<=);
- case /* 0b011010: LT#0 */ 0x1A: VEC_FCMP0 (<);
- case /* 0b111001: GT */ 0x39: VEC_FCMP (>);
- case /* 0b101001: GE */ 0x29: VEC_FCMP (>=);
- case /* 0b001001: EQ */ 0x09: VEC_FCMP (==);
- default:
- HALT_NYI;
- }
- }
- else
- {
- unsigned decode = (INSTR (29, 29) << 6) | INSTR (15, 10);
- switch (decode)
- {
- case 0x0D: /* 0001101 GT */ VEC_CMP (s, > );
- case 0x0F: /* 0001111 GE */ VEC_CMP (s, >= );
- case 0x22: /* 0100010 GT #0 */ VEC_CMP0 (s, > );
- case 0x23: /* 0100011 TST */ VEC_CMP (u, & );
- case 0x26: /* 0100110 EQ #0 */ VEC_CMP0 (s, == );
- case 0x2A: /* 0101010 LT #0 */ VEC_CMP0 (s, < );
- case 0x4D: /* 1001101 HI */ VEC_CMP (u, > );
- case 0x4F: /* 1001111 HS */ VEC_CMP (u, >= );
- case 0x62: /* 1100010 GE #0 */ VEC_CMP0 (s, >= );
- case 0x63: /* 1100011 EQ */ VEC_CMP (u, == );
- case 0x66: /* 1100110 LE #0 */ VEC_CMP0 (s, <= );
- default:
- if (vm == 0)
- HALT_NYI;
- do_vec_maxv (cpu);
- }
- }
- }
- static void
- do_vec_SSHL (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = first part (0)/ second part (1)
- instr[29,24] = 00 1110
- instr[23,22] = size: byte(00), half(01), word (10), long (11)
- instr[21] = 1
- instr[20,16] = Vm
- instr[15,10] = 0100 01
- instr[9,5] = Vn
- instr[4,0] = Vd. */
- unsigned full = INSTR (30, 30);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- signed int shift;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x11);
- /* FIXME: What is a signed shift left in this context ?. */
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- {
- shift = aarch64_get_vec_s8 (cpu, vm, i);
- if (shift >= 0)
- aarch64_set_vec_s8 (cpu, vd, i, aarch64_get_vec_s8 (cpu, vn, i)
- << shift);
- else
- aarch64_set_vec_s8 (cpu, vd, i, aarch64_get_vec_s8 (cpu, vn, i)
- >> - shift);
- }
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- {
- shift = aarch64_get_vec_s8 (cpu, vm, i * 2);
- if (shift >= 0)
- aarch64_set_vec_s16 (cpu, vd, i, aarch64_get_vec_s16 (cpu, vn, i)
- << shift);
- else
- aarch64_set_vec_s16 (cpu, vd, i, aarch64_get_vec_s16 (cpu, vn, i)
- >> - shift);
- }
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- {
- shift = aarch64_get_vec_s8 (cpu, vm, i * 4);
- if (shift >= 0)
- aarch64_set_vec_s32 (cpu, vd, i, aarch64_get_vec_s32 (cpu, vn, i)
- << shift);
- else
- aarch64_set_vec_s32 (cpu, vd, i, aarch64_get_vec_s32 (cpu, vn, i)
- >> - shift);
- }
- return;
- case 3:
- if (! full)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- {
- shift = aarch64_get_vec_s8 (cpu, vm, i * 8);
- if (shift >= 0)
- aarch64_set_vec_s64 (cpu, vd, i, aarch64_get_vec_s64 (cpu, vn, i)
- << shift);
- else
- aarch64_set_vec_s64 (cpu, vd, i, aarch64_get_vec_s64 (cpu, vn, i)
- >> - shift);
- }
- return;
- }
- }
- static void
- do_vec_USHL (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = first part (0)/ second part (1)
- instr[29,24] = 10 1110
- instr[23,22] = size: byte(00), half(01), word (10), long (11)
- instr[21] = 1
- instr[20,16] = Vm
- instr[15,10] = 0100 01
- instr[9,5] = Vn
- instr[4,0] = Vd */
- unsigned full = INSTR (30, 30);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- signed int shift;
- NYI_assert (29, 24, 0x2E);
- NYI_assert (15, 10, 0x11);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- {
- shift = aarch64_get_vec_s8 (cpu, vm, i);
- if (shift >= 0)
- aarch64_set_vec_u8 (cpu, vd, i, aarch64_get_vec_u8 (cpu, vn, i)
- << shift);
- else
- aarch64_set_vec_u8 (cpu, vd, i, aarch64_get_vec_u8 (cpu, vn, i)
- >> - shift);
- }
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- {
- shift = aarch64_get_vec_s8 (cpu, vm, i * 2);
- if (shift >= 0)
- aarch64_set_vec_u16 (cpu, vd, i, aarch64_get_vec_u16 (cpu, vn, i)
- << shift);
- else
- aarch64_set_vec_u16 (cpu, vd, i, aarch64_get_vec_u16 (cpu, vn, i)
- >> - shift);
- }
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- {
- shift = aarch64_get_vec_s8 (cpu, vm, i * 4);
- if (shift >= 0)
- aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vn, i)
- << shift);
- else
- aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vn, i)
- >> - shift);
- }
- return;
- case 3:
- if (! full)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- {
- shift = aarch64_get_vec_s8 (cpu, vm, i * 8);
- if (shift >= 0)
- aarch64_set_vec_u64 (cpu, vd, i, aarch64_get_vec_u64 (cpu, vn, i)
- << shift);
- else
- aarch64_set_vec_u64 (cpu, vd, i, aarch64_get_vec_u64 (cpu, vn, i)
- >> - shift);
- }
- return;
- }
- }
- static void
- do_vec_FMLA (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half selector
- instr[29,23] = 0011100
- instr[22] = size: 0=>float, 1=>double
- instr[21] = 1
- instr[20,16] = Vn
- instr[15,10] = 1100 11
- instr[9,5] = Vm
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 23, 0x1C);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x33);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- if (! full)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, vd, i,
- aarch64_get_vec_double (cpu, vn, i) *
- aarch64_get_vec_double (cpu, vm, i) +
- aarch64_get_vec_double (cpu, vd, i));
- }
- else
- {
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i,
- aarch64_get_vec_float (cpu, vn, i) *
- aarch64_get_vec_float (cpu, vm, i) +
- aarch64_get_vec_float (cpu, vd, i));
- }
- }
- static void
- do_vec_max (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half selector
- instr[29] = SMAX (0) / UMAX (1)
- instr[28,24] = 0 1110
- instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit
- instr[21] = 1
- instr[20,16] = Vn
- instr[15,10] = 0110 01
- instr[9,5] = Vm
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (28, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x19);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (29, 29))
- {
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i,
- aarch64_get_vec_u8 (cpu, vn, i)
- > aarch64_get_vec_u8 (cpu, vm, i)
- ? aarch64_get_vec_u8 (cpu, vn, i)
- : aarch64_get_vec_u8 (cpu, vm, i));
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, vd, i,
- aarch64_get_vec_u16 (cpu, vn, i)
- > aarch64_get_vec_u16 (cpu, vm, i)
- ? aarch64_get_vec_u16 (cpu, vn, i)
- : aarch64_get_vec_u16 (cpu, vm, i));
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i,
- aarch64_get_vec_u32 (cpu, vn, i)
- > aarch64_get_vec_u32 (cpu, vm, i)
- ? aarch64_get_vec_u32 (cpu, vn, i)
- : aarch64_get_vec_u32 (cpu, vm, i));
- return;
- case 3:
- HALT_UNALLOC;
- }
- }
- else
- {
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_s8 (cpu, vd, i,
- aarch64_get_vec_s8 (cpu, vn, i)
- > aarch64_get_vec_s8 (cpu, vm, i)
- ? aarch64_get_vec_s8 (cpu, vn, i)
- : aarch64_get_vec_s8 (cpu, vm, i));
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_s16 (cpu, vd, i,
- aarch64_get_vec_s16 (cpu, vn, i)
- > aarch64_get_vec_s16 (cpu, vm, i)
- ? aarch64_get_vec_s16 (cpu, vn, i)
- : aarch64_get_vec_s16 (cpu, vm, i));
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_s32 (cpu, vd, i,
- aarch64_get_vec_s32 (cpu, vn, i)
- > aarch64_get_vec_s32 (cpu, vm, i)
- ? aarch64_get_vec_s32 (cpu, vn, i)
- : aarch64_get_vec_s32 (cpu, vm, i));
- return;
- case 3:
- HALT_UNALLOC;
- }
- }
- }
- static void
- do_vec_min (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half selector
- instr[29] = SMIN (0) / UMIN (1)
- instr[28,24] = 0 1110
- instr[23,22] = size: 00=> 8-bit, 01=> 16-bit, 10=> 32-bit
- instr[21] = 1
- instr[20,16] = Vn
- instr[15,10] = 0110 11
- instr[9,5] = Vm
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (28, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x1B);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (29, 29))
- {
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i,
- aarch64_get_vec_u8 (cpu, vn, i)
- < aarch64_get_vec_u8 (cpu, vm, i)
- ? aarch64_get_vec_u8 (cpu, vn, i)
- : aarch64_get_vec_u8 (cpu, vm, i));
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, vd, i,
- aarch64_get_vec_u16 (cpu, vn, i)
- < aarch64_get_vec_u16 (cpu, vm, i)
- ? aarch64_get_vec_u16 (cpu, vn, i)
- : aarch64_get_vec_u16 (cpu, vm, i));
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i,
- aarch64_get_vec_u32 (cpu, vn, i)
- < aarch64_get_vec_u32 (cpu, vm, i)
- ? aarch64_get_vec_u32 (cpu, vn, i)
- : aarch64_get_vec_u32 (cpu, vm, i));
- return;
- case 3:
- HALT_UNALLOC;
- }
- }
- else
- {
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_s8 (cpu, vd, i,
- aarch64_get_vec_s8 (cpu, vn, i)
- < aarch64_get_vec_s8 (cpu, vm, i)
- ? aarch64_get_vec_s8 (cpu, vn, i)
- : aarch64_get_vec_s8 (cpu, vm, i));
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_s16 (cpu, vd, i,
- aarch64_get_vec_s16 (cpu, vn, i)
- < aarch64_get_vec_s16 (cpu, vm, i)
- ? aarch64_get_vec_s16 (cpu, vn, i)
- : aarch64_get_vec_s16 (cpu, vm, i));
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_s32 (cpu, vd, i,
- aarch64_get_vec_s32 (cpu, vn, i)
- < aarch64_get_vec_s32 (cpu, vm, i)
- ? aarch64_get_vec_s32 (cpu, vn, i)
- : aarch64_get_vec_s32 (cpu, vm, i));
- return;
- case 3:
- HALT_UNALLOC;
- }
- }
- }
- static void
- do_vec_sub_long (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = lower (0) / upper (1)
- instr[29] = signed (0) / unsigned (1)
- instr[28,24] = 0 1110
- instr[23,22] = size: bytes (00), half (01), word (10)
- instr[21] = 1
- insrt[20,16] = Vm
- instr[15,10] = 0010 00
- instr[9,5] = Vn
- instr[4,0] = V dest. */
- unsigned size = INSTR (23, 22);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned bias = 0;
- unsigned i;
- NYI_assert (28, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x08);
- if (size == 3)
- HALT_UNALLOC;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (30, 29))
- {
- case 2: /* SSUBL2. */
- bias = 2;
- case 0: /* SSUBL. */
- switch (size)
- {
- case 0:
- bias *= 3;
- for (i = 0; i < 8; i++)
- aarch64_set_vec_s16 (cpu, vd, i,
- aarch64_get_vec_s8 (cpu, vn, i + bias)
- - aarch64_get_vec_s8 (cpu, vm, i + bias));
- break;
- case 1:
- bias *= 2;
- for (i = 0; i < 4; i++)
- aarch64_set_vec_s32 (cpu, vd, i,
- aarch64_get_vec_s16 (cpu, vn, i + bias)
- - aarch64_get_vec_s16 (cpu, vm, i + bias));
- break;
- case 2:
- for (i = 0; i < 2; i++)
- aarch64_set_vec_s64 (cpu, vd, i,
- aarch64_get_vec_s32 (cpu, vn, i + bias)
- - aarch64_get_vec_s32 (cpu, vm, i + bias));
- break;
- default:
- HALT_UNALLOC;
- }
- break;
- case 3: /* USUBL2. */
- bias = 2;
- case 1: /* USUBL. */
- switch (size)
- {
- case 0:
- bias *= 3;
- for (i = 0; i < 8; i++)
- aarch64_set_vec_u16 (cpu, vd, i,
- aarch64_get_vec_u8 (cpu, vn, i + bias)
- - aarch64_get_vec_u8 (cpu, vm, i + bias));
- break;
- case 1:
- bias *= 2;
- for (i = 0; i < 4; i++)
- aarch64_set_vec_u32 (cpu, vd, i,
- aarch64_get_vec_u16 (cpu, vn, i + bias)
- - aarch64_get_vec_u16 (cpu, vm, i + bias));
- break;
- case 2:
- for (i = 0; i < 2; i++)
- aarch64_set_vec_u64 (cpu, vd, i,
- aarch64_get_vec_u32 (cpu, vn, i + bias)
- - aarch64_get_vec_u32 (cpu, vm, i + bias));
- break;
- default:
- HALT_UNALLOC;
- }
- break;
- }
- }
- static void
- do_vec_ADDP (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,24] = 00 1110
- instr[23,22] = size: bytes (00), half (01), word (10), long (11)
- instr[21] = 1
- insrt[20,16] = Vm
- instr[15,10] = 1011 11
- instr[9,5] = Vn
- instr[4,0] = V dest. */
- FRegister copy_vn;
- FRegister copy_vm;
- unsigned full = INSTR (30, 30);
- unsigned size = INSTR (23, 22);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i, range;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x2F);
- /* Make copies of the source registers in case vd == vn/vm. */
- copy_vn = cpu->fr[vn];
- copy_vm = cpu->fr[vm];
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (size)
- {
- case 0:
- range = full ? 8 : 4;
- for (i = 0; i < range; i++)
- {
- aarch64_set_vec_u8 (cpu, vd, i,
- copy_vn.b[i * 2] + copy_vn.b[i * 2 + 1]);
- aarch64_set_vec_u8 (cpu, vd, i + range,
- copy_vm.b[i * 2] + copy_vm.b[i * 2 + 1]);
- }
- return;
- case 1:
- range = full ? 4 : 2;
- for (i = 0; i < range; i++)
- {
- aarch64_set_vec_u16 (cpu, vd, i,
- copy_vn.h[i * 2] + copy_vn.h[i * 2 + 1]);
- aarch64_set_vec_u16 (cpu, vd, i + range,
- copy_vm.h[i * 2] + copy_vm.h[i * 2 + 1]);
- }
- return;
- case 2:
- range = full ? 2 : 1;
- for (i = 0; i < range; i++)
- {
- aarch64_set_vec_u32 (cpu, vd, i,
- copy_vn.w[i * 2] + copy_vn.w[i * 2 + 1]);
- aarch64_set_vec_u32 (cpu, vd, i + range,
- copy_vm.w[i * 2] + copy_vm.w[i * 2 + 1]);
- }
- return;
- case 3:
- if (! full)
- HALT_UNALLOC;
- aarch64_set_vec_u64 (cpu, vd, 0, copy_vn.v[0] + copy_vn.v[1]);
- aarch64_set_vec_u64 (cpu, vd, 1, copy_vm.v[0] + copy_vm.v[1]);
- return;
- }
- }
- /* Float point vector convert to longer (precision). */
- static void
- do_vec_FCVTL (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0) / all (1)
- instr[29,23] = 00 1110 0
- instr[22] = single (0) / double (1)
- instr[21,10] = 10 0001 0111 10
- instr[9,5] = Rn
- instr[4,0] = Rd. */
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned full = INSTR (30, 30);
- unsigned i;
- NYI_assert (31, 31, 0);
- NYI_assert (29, 23, 0x1C);
- NYI_assert (21, 10, 0x85E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, rd, i,
- aarch64_get_vec_float (cpu, rn, i + 2*full));
- }
- else
- {
- HALT_NYI;
- #if 0
- /* TODO: Implement missing half-float support. */
- for (i = 0; i < 4; i++)
- aarch64_set_vec_float (cpu, rd, i,
- aarch64_get_vec_halffloat (cpu, rn, i + 4*full));
- #endif
- }
- }
- static void
- do_vec_FABS (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,23] = 00 1110 1
- instr[22] = float(0)/double(1)
- instr[21,16] = 10 0000
- instr[15,10] = 1111 10
- instr[9,5] = Vn
- instr[4,0] = Vd. */
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned full = INSTR (30, 30);
- unsigned i;
- NYI_assert (29, 23, 0x1D);
- NYI_assert (21, 10, 0x83E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- if (! full)
- HALT_NYI;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, vd, i,
- fabs (aarch64_get_vec_double (cpu, vn, i)));
- }
- else
- {
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i,
- fabsf (aarch64_get_vec_float (cpu, vn, i)));
- }
- }
- static void
- do_vec_FCVTZS (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0) / all (1)
- instr[29,23] = 00 1110 1
- instr[22] = single (0) / double (1)
- instr[21,10] = 10 0001 1011 10
- instr[9,5] = Rn
- instr[4,0] = Rd. */
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned full = INSTR (30, 30);
- unsigned i;
- NYI_assert (31, 31, 0);
- NYI_assert (29, 23, 0x1D);
- NYI_assert (21, 10, 0x86E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- if (! full)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_s64 (cpu, rd, i,
- (int64_t) aarch64_get_vec_double (cpu, rn, i));
- }
- else
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_s32 (cpu, rd, i,
- (int32_t) aarch64_get_vec_float (cpu, rn, i));
- }
- static void
- do_vec_REV64 (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half
- instr[29,24] = 00 1110
- instr[23,22] = size
- instr[21,10] = 10 0000 0000 10
- instr[9,5] = Rn
- instr[4,0] = Rd. */
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned size = INSTR (23, 22);
- unsigned full = INSTR (30, 30);
- unsigned i;
- FRegister val;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 10, 0x802);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (size)
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- val.b[i ^ 0x7] = aarch64_get_vec_u8 (cpu, rn, i);
- break;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- val.h[i ^ 0x3] = aarch64_get_vec_u16 (cpu, rn, i);
- break;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- val.w[i ^ 0x1] = aarch64_get_vec_u32 (cpu, rn, i);
- break;
-
- case 3:
- HALT_UNALLOC;
- }
- aarch64_set_vec_u64 (cpu, rd, 0, val.v[0]);
- if (full)
- aarch64_set_vec_u64 (cpu, rd, 1, val.v[1]);
- }
- static void
- do_vec_REV16 (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half
- instr[29,24] = 00 1110
- instr[23,22] = size
- instr[21,10] = 10 0000 0001 10
- instr[9,5] = Rn
- instr[4,0] = Rd. */
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned size = INSTR (23, 22);
- unsigned full = INSTR (30, 30);
- unsigned i;
- FRegister val;
- NYI_assert (29, 24, 0x0E);
- NYI_assert (21, 10, 0x806);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (size)
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- val.b[i ^ 0x1] = aarch64_get_vec_u8 (cpu, rn, i);
- break;
- default:
- HALT_UNALLOC;
- }
- aarch64_set_vec_u64 (cpu, rd, 0, val.v[0]);
- if (full)
- aarch64_set_vec_u64 (cpu, rd, 1, val.v[1]);
- }
- static void
- do_vec_op1 (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half/full
- instr[29,24] = 00 1110
- instr[23,21] = ???
- instr[20,16] = Vm
- instr[15,10] = sub-opcode
- instr[9,5] = Vn
- instr[4,0] = Vd */
- NYI_assert (29, 24, 0x0E);
- if (INSTR (21, 21) == 0)
- {
- if (INSTR (23, 22) == 0)
- {
- if (INSTR (30, 30) == 1
- && INSTR (17, 14) == 0
- && INSTR (12, 10) == 7)
- return do_vec_ins_2 (cpu);
- switch (INSTR (15, 10))
- {
- case 0x01: do_vec_DUP_vector_into_vector (cpu); return;
- case 0x03: do_vec_DUP_scalar_into_vector (cpu); return;
- case 0x07: do_vec_INS (cpu); return;
- case 0x0B: do_vec_SMOV_into_scalar (cpu); return;
- case 0x0F: do_vec_UMOV_into_scalar (cpu); return;
- case 0x00:
- case 0x08:
- case 0x10:
- case 0x18:
- do_vec_TBL (cpu); return;
- case 0x06:
- case 0x16:
- do_vec_UZP (cpu); return;
- case 0x0A: do_vec_TRN (cpu); return;
- case 0x0E:
- case 0x1E:
- do_vec_ZIP (cpu); return;
- default:
- HALT_NYI;
- }
- }
- switch (INSTR (13, 10))
- {
- case 0x6: do_vec_UZP (cpu); return;
- case 0xE: do_vec_ZIP (cpu); return;
- case 0xA: do_vec_TRN (cpu); return;
- default: HALT_NYI;
- }
- }
- switch (INSTR (15, 10))
- {
- case 0x02: do_vec_REV64 (cpu); return;
- case 0x06: do_vec_REV16 (cpu); return;
- case 0x07:
- switch (INSTR (23, 21))
- {
- case 1: do_vec_AND (cpu); return;
- case 3: do_vec_BIC (cpu); return;
- case 5: do_vec_ORR (cpu); return;
- case 7: do_vec_ORN (cpu); return;
- default: HALT_NYI;
- }
- case 0x08: do_vec_sub_long (cpu); return;
- case 0x0a: do_vec_XTN (cpu); return;
- case 0x11: do_vec_SSHL (cpu); return;
- case 0x16: do_vec_CNT (cpu); return;
- case 0x19: do_vec_max (cpu); return;
- case 0x1B: do_vec_min (cpu); return;
- case 0x21: do_vec_add (cpu); return;
- case 0x25: do_vec_MLA (cpu); return;
- case 0x27: do_vec_mul (cpu); return;
- case 0x2F: do_vec_ADDP (cpu); return;
- case 0x30: do_vec_mull (cpu); return;
- case 0x33: do_vec_FMLA (cpu); return;
- case 0x35: do_vec_fadd (cpu); return;
- case 0x1E:
- switch (INSTR (20, 16))
- {
- case 0x01: do_vec_FCVTL (cpu); return;
- default: HALT_NYI;
- }
- case 0x2E:
- switch (INSTR (20, 16))
- {
- case 0x00: do_vec_ABS (cpu); return;
- case 0x01: do_vec_FCVTZS (cpu); return;
- case 0x11: do_vec_ADDV (cpu); return;
- default: HALT_NYI;
- }
- case 0x31:
- case 0x3B:
- do_vec_Fminmax (cpu); return;
- case 0x0D:
- case 0x0F:
- case 0x22:
- case 0x23:
- case 0x26:
- case 0x2A:
- case 0x32:
- case 0x36:
- case 0x39:
- case 0x3A:
- do_vec_compare (cpu); return;
- case 0x3E:
- do_vec_FABS (cpu); return;
- default:
- HALT_NYI;
- }
- }
- static void
- do_vec_xtl (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30,29] = SXTL (00), UXTL (01), SXTL2 (10), UXTL2 (11)
- instr[28,22] = 0 1111 00
- instr[21,16] = size & shift (USHLL, SSHLL, USHLL2, SSHLL2)
- instr[15,10] = 1010 01
- instr[9,5] = V source
- instr[4,0] = V dest. */
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i, shift, bias = 0;
- NYI_assert (28, 22, 0x3C);
- NYI_assert (15, 10, 0x29);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (30, 29))
- {
- case 2: /* SXTL2, SSHLL2. */
- bias = 2;
- case 0: /* SXTL, SSHLL. */
- if (INSTR (21, 21))
- {
- int64_t val1, val2;
- shift = INSTR (20, 16);
- /* Get the source values before setting the destination values
- in case the source and destination are the same. */
- val1 = aarch64_get_vec_s32 (cpu, vs, bias) << shift;
- val2 = aarch64_get_vec_s32 (cpu, vs, bias + 1) << shift;
- aarch64_set_vec_s64 (cpu, vd, 0, val1);
- aarch64_set_vec_s64 (cpu, vd, 1, val2);
- }
- else if (INSTR (20, 20))
- {
- int32_t v[4];
- int32_t v1,v2,v3,v4;
- shift = INSTR (19, 16);
- bias *= 2;
- for (i = 0; i < 4; i++)
- v[i] = aarch64_get_vec_s16 (cpu, vs, bias + i) << shift;
- for (i = 0; i < 4; i++)
- aarch64_set_vec_s32 (cpu, vd, i, v[i]);
- }
- else
- {
- int16_t v[8];
- NYI_assert (19, 19, 1);
- shift = INSTR (18, 16);
- bias *= 4;
- for (i = 0; i < 8; i++)
- v[i] = aarch64_get_vec_s8 (cpu, vs, i + bias) << shift;
- for (i = 0; i < 8; i++)
- aarch64_set_vec_s16 (cpu, vd, i, v[i]);
- }
- return;
- case 3: /* UXTL2, USHLL2. */
- bias = 2;
- case 1: /* UXTL, USHLL. */
- if (INSTR (21, 21))
- {
- uint64_t v1, v2;
- shift = INSTR (20, 16);
- v1 = aarch64_get_vec_u32 (cpu, vs, bias) << shift;
- v2 = aarch64_get_vec_u32 (cpu, vs, bias + 1) << shift;
- aarch64_set_vec_u64 (cpu, vd, 0, v1);
- aarch64_set_vec_u64 (cpu, vd, 1, v2);
- }
- else if (INSTR (20, 20))
- {
- uint32_t v[4];
- shift = INSTR (19, 16);
- bias *= 2;
- for (i = 0; i < 4; i++)
- v[i] = aarch64_get_vec_u16 (cpu, vs, i + bias) << shift;
- for (i = 0; i < 4; i++)
- aarch64_set_vec_u32 (cpu, vd, i, v[i]);
- }
- else
- {
- uint16_t v[8];
- NYI_assert (19, 19, 1);
- shift = INSTR (18, 16);
- bias *= 4;
- for (i = 0; i < 8; i++)
- v[i] = aarch64_get_vec_u8 (cpu, vs, i + bias) << shift;
- for (i = 0; i < 8; i++)
- aarch64_set_vec_u16 (cpu, vd, i, v[i]);
- }
- return;
- }
- }
- static void
- do_vec_SHL (sim_cpu *cpu)
- {
- /* instr [31] = 0
- instr [30] = half(0)/full(1)
- instr [29,23] = 001 1110
- instr [22,16] = size and shift amount
- instr [15,10] = 01 0101
- instr [9, 5] = Vs
- instr [4, 0] = Vd. */
- int shift;
- int full = INSTR (30, 30);
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (29, 23, 0x1E);
- NYI_assert (15, 10, 0x15);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- shift = INSTR (21, 16);
- if (full == 0)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- {
- uint64_t val = aarch64_get_vec_u64 (cpu, vs, i);
- aarch64_set_vec_u64 (cpu, vd, i, val << shift);
- }
- return;
- }
- if (INSTR (21, 21))
- {
- shift = INSTR (20, 16);
- for (i = 0; i < (full ? 4 : 2); i++)
- {
- uint32_t val = aarch64_get_vec_u32 (cpu, vs, i);
- aarch64_set_vec_u32 (cpu, vd, i, val << shift);
- }
- return;
- }
- if (INSTR (20, 20))
- {
- shift = INSTR (19, 16);
- for (i = 0; i < (full ? 8 : 4); i++)
- {
- uint16_t val = aarch64_get_vec_u16 (cpu, vs, i);
- aarch64_set_vec_u16 (cpu, vd, i, val << shift);
- }
- return;
- }
- if (INSTR (19, 19) == 0)
- HALT_UNALLOC;
- shift = INSTR (18, 16);
- for (i = 0; i < (full ? 16 : 8); i++)
- {
- uint8_t val = aarch64_get_vec_u8 (cpu, vs, i);
- aarch64_set_vec_u8 (cpu, vd, i, val << shift);
- }
- }
- static void
- do_vec_SSHR_USHR (sim_cpu *cpu)
- {
- /* instr [31] = 0
- instr [30] = half(0)/full(1)
- instr [29] = signed(0)/unsigned(1)
- instr [28,23] = 0 1111 0
- instr [22,16] = size and shift amount
- instr [15,10] = 0000 01
- instr [9, 5] = Vs
- instr [4, 0] = Vd. */
- int full = INSTR (30, 30);
- int sign = ! INSTR (29, 29);
- unsigned shift = INSTR (22, 16);
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (28, 23, 0x1E);
- NYI_assert (15, 10, 0x01);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- shift = 128 - shift;
- if (full == 0)
- HALT_UNALLOC;
- if (sign)
- for (i = 0; i < 2; i++)
- {
- int64_t val = aarch64_get_vec_s64 (cpu, vs, i);
- aarch64_set_vec_s64 (cpu, vd, i, val >> shift);
- }
- else
- for (i = 0; i < 2; i++)
- {
- uint64_t val = aarch64_get_vec_u64 (cpu, vs, i);
- aarch64_set_vec_u64 (cpu, vd, i, val >> shift);
- }
- return;
- }
- if (INSTR (21, 21))
- {
- shift = 64 - shift;
- if (sign)
- for (i = 0; i < (full ? 4 : 2); i++)
- {
- int32_t val = aarch64_get_vec_s32 (cpu, vs, i);
- aarch64_set_vec_s32 (cpu, vd, i, val >> shift);
- }
- else
- for (i = 0; i < (full ? 4 : 2); i++)
- {
- uint32_t val = aarch64_get_vec_u32 (cpu, vs, i);
- aarch64_set_vec_u32 (cpu, vd, i, val >> shift);
- }
- return;
- }
- if (INSTR (20, 20))
- {
- shift = 32 - shift;
- if (sign)
- for (i = 0; i < (full ? 8 : 4); i++)
- {
- int16_t val = aarch64_get_vec_s16 (cpu, vs, i);
- aarch64_set_vec_s16 (cpu, vd, i, val >> shift);
- }
- else
- for (i = 0; i < (full ? 8 : 4); i++)
- {
- uint16_t val = aarch64_get_vec_u16 (cpu, vs, i);
- aarch64_set_vec_u16 (cpu, vd, i, val >> shift);
- }
- return;
- }
- if (INSTR (19, 19) == 0)
- HALT_UNALLOC;
- shift = 16 - shift;
- if (sign)
- for (i = 0; i < (full ? 16 : 8); i++)
- {
- int8_t val = aarch64_get_vec_s8 (cpu, vs, i);
- aarch64_set_vec_s8 (cpu, vd, i, val >> shift);
- }
- else
- for (i = 0; i < (full ? 16 : 8); i++)
- {
- uint8_t val = aarch64_get_vec_u8 (cpu, vs, i);
- aarch64_set_vec_u8 (cpu, vd, i, val >> shift);
- }
- }
- static void
- do_vec_MUL_by_element (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half/full
- instr[29,24] = 00 1111
- instr[23,22] = size
- instr[21] = L
- instr[20] = M
- instr[19,16] = m
- instr[15,12] = 1000
- instr[11] = H
- instr[10] = 0
- instr[9,5] = Vn
- instr[4,0] = Vd */
- unsigned full = INSTR (30, 30);
- unsigned L = INSTR (21, 21);
- unsigned H = INSTR (11, 11);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned size = INSTR (23, 22);
- unsigned index;
- unsigned vm;
- unsigned e;
- NYI_assert (29, 24, 0x0F);
- NYI_assert (15, 12, 0x8);
- NYI_assert (10, 10, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (size)
- {
- case 1:
- {
- /* 16 bit products. */
- uint16_t product;
- uint16_t element1;
- uint16_t element2;
- index = (H << 2) | (L << 1) | INSTR (20, 20);
- vm = INSTR (19, 16);
- element2 = aarch64_get_vec_u16 (cpu, vm, index);
- for (e = 0; e < (full ? 8 : 4); e ++)
- {
- element1 = aarch64_get_vec_u16 (cpu, vn, e);
- product = element1 * element2;
- aarch64_set_vec_u16 (cpu, vd, e, product);
- }
- }
- break;
- case 2:
- {
- /* 32 bit products. */
- uint32_t product;
- uint32_t element1;
- uint32_t element2;
- index = (H << 1) | L;
- vm = INSTR (20, 16);
- element2 = aarch64_get_vec_u32 (cpu, vm, index);
- for (e = 0; e < (full ? 4 : 2); e ++)
- {
- element1 = aarch64_get_vec_u32 (cpu, vn, e);
- product = element1 * element2;
- aarch64_set_vec_u32 (cpu, vd, e, product);
- }
- }
- break;
- default:
- HALT_UNALLOC;
- }
- }
- static void
- do_FMLA_by_element (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half/full
- instr[29,23] = 00 1111 1
- instr[22] = size
- instr[21] = L
- instr[20,16] = m
- instr[15,12] = 0001
- instr[11] = H
- instr[10] = 0
- instr[9,5] = Vn
- instr[4,0] = Vd */
- unsigned full = INSTR (30, 30);
- unsigned size = INSTR (22, 22);
- unsigned L = INSTR (21, 21);
- unsigned vm = INSTR (20, 16);
- unsigned H = INSTR (11, 11);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned e;
- NYI_assert (29, 23, 0x1F);
- NYI_assert (15, 12, 0x1);
- NYI_assert (10, 10, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (size)
- {
- double element1, element2;
- if (! full || L)
- HALT_UNALLOC;
- element2 = aarch64_get_vec_double (cpu, vm, H);
- for (e = 0; e < 2; e++)
- {
- element1 = aarch64_get_vec_double (cpu, vn, e);
- element1 *= element2;
- element1 += aarch64_get_vec_double (cpu, vd, e);
- aarch64_set_vec_double (cpu, vd, e, element1);
- }
- }
- else
- {
- float element1;
- float element2 = aarch64_get_vec_float (cpu, vm, (H << 1) | L);
- for (e = 0; e < (full ? 4 : 2); e++)
- {
- element1 = aarch64_get_vec_float (cpu, vn, e);
- element1 *= element2;
- element1 += aarch64_get_vec_float (cpu, vd, e);
- aarch64_set_vec_float (cpu, vd, e, element1);
- }
- }
- }
- static void
- do_vec_op2 (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half/full
- instr[29,24] = 00 1111
- instr[23] = ?
- instr[22,16] = element size & index
- instr[15,10] = sub-opcode
- instr[9,5] = Vm
- instr[4,0] = Vd */
- NYI_assert (29, 24, 0x0F);
- if (INSTR (23, 23) != 0)
- {
- switch (INSTR (15, 10))
- {
- case 0x04:
- case 0x06:
- do_FMLA_by_element (cpu);
- return;
- case 0x20:
- case 0x22:
- do_vec_MUL_by_element (cpu);
- return;
- default:
- HALT_NYI;
- }
- }
- else
- {
- switch (INSTR (15, 10))
- {
- case 0x01: do_vec_SSHR_USHR (cpu); return;
- case 0x15: do_vec_SHL (cpu); return;
- case 0x20:
- case 0x22: do_vec_MUL_by_element (cpu); return;
- case 0x29: do_vec_xtl (cpu); return;
- default: HALT_NYI;
- }
- }
- }
- static void
- do_vec_neg (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full(1)/half(0)
- instr[29,24] = 10 1110
- instr[23,22] = size: byte(00), half (01), word (10), long (11)
- instr[21,10] = 1000 0010 1110
- instr[9,5] = Vs
- instr[4,0] = Vd */
- int full = INSTR (30, 30);
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (29, 24, 0x2E);
- NYI_assert (21, 10, 0x82E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_s8 (cpu, vd, i, - aarch64_get_vec_s8 (cpu, vs, i));
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_s16 (cpu, vd, i, - aarch64_get_vec_s16 (cpu, vs, i));
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_s32 (cpu, vd, i, - aarch64_get_vec_s32 (cpu, vs, i));
- return;
- case 3:
- if (! full)
- HALT_NYI;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_s64 (cpu, vd, i, - aarch64_get_vec_s64 (cpu, vs, i));
- return;
- }
- }
- static void
- do_vec_sqrt (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full(1)/half(0)
- instr[29,23] = 101 1101
- instr[22] = single(0)/double(1)
- instr[21,10] = 1000 0111 1110
- instr[9,5] = Vs
- instr[4,0] = Vd. */
- int full = INSTR (30, 30);
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (29, 23, 0x5B);
- NYI_assert (21, 10, 0x87E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22) == 0)
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i,
- sqrtf (aarch64_get_vec_float (cpu, vs, i)));
- else
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, vd, i,
- sqrt (aarch64_get_vec_double (cpu, vs, i)));
- }
- static void
- do_vec_mls_indexed (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,24] = 10 1111
- instr[23,22] = 16-bit(01)/32-bit(10)
- instr[21,20+11] = index (if 16-bit)
- instr[21+11] = index (if 32-bit)
- instr[20,16] = Vm
- instr[15,12] = 0100
- instr[11] = part of index
- instr[10] = 0
- instr[9,5] = Vs
- instr[4,0] = Vd. */
- int full = INSTR (30, 30);
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned vm = INSTR (20, 16);
- unsigned i;
- NYI_assert (15, 12, 4);
- NYI_assert (10, 10, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 1:
- {
- unsigned elem;
- uint32_t val;
- if (vm > 15)
- HALT_NYI;
- elem = (INSTR (21, 20) << 1) | INSTR (11, 11);
- val = aarch64_get_vec_u16 (cpu, vm, elem);
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u32 (cpu, vd, i,
- aarch64_get_vec_u32 (cpu, vd, i) -
- (aarch64_get_vec_u32 (cpu, vs, i) * val));
- return;
- }
- case 2:
- {
- unsigned elem = (INSTR (21, 21) << 1) | INSTR (11, 11);
- uint64_t val = aarch64_get_vec_u32 (cpu, vm, elem);
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u64 (cpu, vd, i,
- aarch64_get_vec_u64 (cpu, vd, i) -
- (aarch64_get_vec_u64 (cpu, vs, i) * val));
- return;
- }
- case 0:
- case 3:
- default:
- HALT_NYI;
- }
- }
- static void
- do_vec_SUB (sim_cpu *cpu)
- {
- /* instr [31] = 0
- instr [30] = half(0)/full(1)
- instr [29,24] = 10 1110
- instr [23,22] = size: byte(00, half(01), word (10), long (11)
- instr [21] = 1
- instr [20,16] = Vm
- instr [15,10] = 10 0001
- instr [9, 5] = Vn
- instr [4, 0] = Vd. */
- unsigned full = INSTR (30, 30);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (29, 24, 0x2E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x21);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_s8 (cpu, vd, i,
- aarch64_get_vec_s8 (cpu, vn, i)
- - aarch64_get_vec_s8 (cpu, vm, i));
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_s16 (cpu, vd, i,
- aarch64_get_vec_s16 (cpu, vn, i)
- - aarch64_get_vec_s16 (cpu, vm, i));
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_s32 (cpu, vd, i,
- aarch64_get_vec_s32 (cpu, vn, i)
- - aarch64_get_vec_s32 (cpu, vm, i));
- return;
- case 3:
- if (full == 0)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_s64 (cpu, vd, i,
- aarch64_get_vec_s64 (cpu, vn, i)
- - aarch64_get_vec_s64 (cpu, vm, i));
- return;
- }
- }
- static void
- do_vec_MLS (sim_cpu *cpu)
- {
- /* instr [31] = 0
- instr [30] = half(0)/full(1)
- instr [29,24] = 10 1110
- instr [23,22] = size: byte(00, half(01), word (10)
- instr [21] = 1
- instr [20,16] = Vm
- instr [15,10] = 10 0101
- instr [9, 5] = Vn
- instr [4, 0] = Vd. */
- unsigned full = INSTR (30, 30);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (29, 24, 0x2E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x25);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i,
- aarch64_get_vec_u8 (cpu, vd, i)
- - (aarch64_get_vec_u8 (cpu, vn, i)
- * aarch64_get_vec_u8 (cpu, vm, i)));
- return;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, vd, i,
- aarch64_get_vec_u16 (cpu, vd, i)
- - (aarch64_get_vec_u16 (cpu, vn, i)
- * aarch64_get_vec_u16 (cpu, vm, i)));
- return;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i,
- aarch64_get_vec_u32 (cpu, vd, i)
- - (aarch64_get_vec_u32 (cpu, vn, i)
- * aarch64_get_vec_u32 (cpu, vm, i)));
- return;
- default:
- HALT_UNALLOC;
- }
- }
- static void
- do_vec_FDIV (sim_cpu *cpu)
- {
- /* instr [31] = 0
- instr [30] = half(0)/full(1)
- instr [29,23] = 10 1110 0
- instr [22] = float()/double(1)
- instr [21] = 1
- instr [20,16] = Vm
- instr [15,10] = 1111 11
- instr [9, 5] = Vn
- instr [4, 0] = Vd. */
- unsigned full = INSTR (30, 30);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (29, 23, 0x5C);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x3F);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- if (! full)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, vd, i,
- aarch64_get_vec_double (cpu, vn, i)
- / aarch64_get_vec_double (cpu, vm, i));
- }
- else
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i,
- aarch64_get_vec_float (cpu, vn, i)
- / aarch64_get_vec_float (cpu, vm, i));
- }
- static void
- do_vec_FMUL (sim_cpu *cpu)
- {
- /* instr [31] = 0
- instr [30] = half(0)/full(1)
- instr [29,23] = 10 1110 0
- instr [22] = float(0)/double(1)
- instr [21] = 1
- instr [20,16] = Vm
- instr [15,10] = 1101 11
- instr [9, 5] = Vn
- instr [4, 0] = Vd. */
- unsigned full = INSTR (30, 30);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- NYI_assert (29, 23, 0x5C);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x37);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- if (! full)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, vd, i,
- aarch64_get_vec_double (cpu, vn, i)
- * aarch64_get_vec_double (cpu, vm, i));
- }
- else
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i,
- aarch64_get_vec_float (cpu, vn, i)
- * aarch64_get_vec_float (cpu, vm, i));
- }
- static void
- do_vec_FADDP (sim_cpu *cpu)
- {
- /* instr [31] = 0
- instr [30] = half(0)/full(1)
- instr [29,23] = 10 1110 0
- instr [22] = float(0)/double(1)
- instr [21] = 1
- instr [20,16] = Vm
- instr [15,10] = 1101 01
- instr [9, 5] = Vn
- instr [4, 0] = Vd. */
- unsigned full = INSTR (30, 30);
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- NYI_assert (29, 23, 0x5C);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x35);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- /* Extract values before adding them incase vd == vn/vm. */
- double tmp1 = aarch64_get_vec_double (cpu, vn, 0);
- double tmp2 = aarch64_get_vec_double (cpu, vn, 1);
- double tmp3 = aarch64_get_vec_double (cpu, vm, 0);
- double tmp4 = aarch64_get_vec_double (cpu, vm, 1);
- if (! full)
- HALT_UNALLOC;
- aarch64_set_vec_double (cpu, vd, 0, tmp1 + tmp2);
- aarch64_set_vec_double (cpu, vd, 1, tmp3 + tmp4);
- }
- else
- {
- /* Extract values before adding them incase vd == vn/vm. */
- float tmp1 = aarch64_get_vec_float (cpu, vn, 0);
- float tmp2 = aarch64_get_vec_float (cpu, vn, 1);
- float tmp5 = aarch64_get_vec_float (cpu, vm, 0);
- float tmp6 = aarch64_get_vec_float (cpu, vm, 1);
- if (full)
- {
- float tmp3 = aarch64_get_vec_float (cpu, vn, 2);
- float tmp4 = aarch64_get_vec_float (cpu, vn, 3);
- float tmp7 = aarch64_get_vec_float (cpu, vm, 2);
- float tmp8 = aarch64_get_vec_float (cpu, vm, 3);
- aarch64_set_vec_float (cpu, vd, 0, tmp1 + tmp2);
- aarch64_set_vec_float (cpu, vd, 1, tmp3 + tmp4);
- aarch64_set_vec_float (cpu, vd, 2, tmp5 + tmp6);
- aarch64_set_vec_float (cpu, vd, 3, tmp7 + tmp8);
- }
- else
- {
- aarch64_set_vec_float (cpu, vd, 0, tmp1 + tmp2);
- aarch64_set_vec_float (cpu, vd, 1, tmp5 + tmp6);
- }
- }
- }
- static void
- do_vec_FSQRT (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half(0)/full(1)
- instr[29,23] = 10 1110 1
- instr[22] = single(0)/double(1)
- instr[21,10] = 10 0001 1111 10
- instr[9,5] = Vsrc
- instr[4,0] = Vdest. */
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned full = INSTR (30, 30);
- int i;
- NYI_assert (29, 23, 0x5D);
- NYI_assert (21, 10, 0x87E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- if (! full)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, vd, i,
- sqrt (aarch64_get_vec_double (cpu, vn, i)));
- }
- else
- {
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i,
- sqrtf (aarch64_get_vec_float (cpu, vn, i)));
- }
- }
- static void
- do_vec_FNEG (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/full (1)
- instr[29,23] = 10 1110 1
- instr[22] = single (0)/double (1)
- instr[21,10] = 10 0000 1111 10
- instr[9,5] = Vsrc
- instr[4,0] = Vdest. */
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned full = INSTR (30, 30);
- int i;
- NYI_assert (29, 23, 0x5D);
- NYI_assert (21, 10, 0x83E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- if (! full)
- HALT_UNALLOC;
- for (i = 0; i < 2; i++)
- aarch64_set_vec_double (cpu, vd, i,
- - aarch64_get_vec_double (cpu, vn, i));
- }
- else
- {
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_float (cpu, vd, i,
- - aarch64_get_vec_float (cpu, vn, i));
- }
- }
- static void
- do_vec_NOT (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/full (1)
- instr[29,10] = 10 1110 0010 0000 0101 10
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30, 30);
- NYI_assert (29, 10, 0xB8816);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i, ~ aarch64_get_vec_u8 (cpu, vn, i));
- }
- static unsigned int
- clz (uint64_t val, unsigned size)
- {
- uint64_t mask = 1;
- int count;
- mask <<= (size - 1);
- count = 0;
- do
- {
- if (val & mask)
- break;
- mask >>= 1;
- count ++;
- }
- while (mask);
- return count;
- }
- static void
- do_vec_CLZ (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = half (0)/full (1)
- instr[29,24] = 10 1110
- instr[23,22] = size
- instr[21,10] = 10 0000 0100 10
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned i;
- int full = INSTR (30,30);
- NYI_assert (29, 24, 0x2E);
- NYI_assert (21, 10, 0x812);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (23, 22))
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd, i, clz (aarch64_get_vec_u8 (cpu, vn, i), 8));
- break;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, vd, i, clz (aarch64_get_vec_u16 (cpu, vn, i), 16));
- break;
- case 2:
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd, i, clz (aarch64_get_vec_u32 (cpu, vn, i), 32));
- break;
- case 3:
- if (! full)
- HALT_UNALLOC;
- aarch64_set_vec_u64 (cpu, vd, 0, clz (aarch64_get_vec_u64 (cpu, vn, 0), 64));
- aarch64_set_vec_u64 (cpu, vd, 1, clz (aarch64_get_vec_u64 (cpu, vn, 1), 64));
- break;
- }
- }
- static void
- do_vec_MOV_element (sim_cpu *cpu)
- {
- /* instr[31,21] = 0110 1110 000
- instr[20,16] = size & dest index
- instr[15] = 0
- instr[14,11] = source index
- instr[10] = 1
- instr[9,5] = Vs
- instr[4.0] = Vd. */
- unsigned vs = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned src_index;
- unsigned dst_index;
- NYI_assert (31, 21, 0x370);
- NYI_assert (15, 15, 0);
- NYI_assert (10, 10, 1);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (16, 16))
- {
- /* Move a byte. */
- src_index = INSTR (14, 11);
- dst_index = INSTR (20, 17);
- aarch64_set_vec_u8 (cpu, vd, dst_index,
- aarch64_get_vec_u8 (cpu, vs, src_index));
- }
- else if (INSTR (17, 17))
- {
- /* Move 16-bits. */
- NYI_assert (11, 11, 0);
- src_index = INSTR (14, 12);
- dst_index = INSTR (20, 18);
- aarch64_set_vec_u16 (cpu, vd, dst_index,
- aarch64_get_vec_u16 (cpu, vs, src_index));
- }
- else if (INSTR (18, 18))
- {
- /* Move 32-bits. */
- NYI_assert (12, 11, 0);
- src_index = INSTR (14, 13);
- dst_index = INSTR (20, 19);
- aarch64_set_vec_u32 (cpu, vd, dst_index,
- aarch64_get_vec_u32 (cpu, vs, src_index));
- }
- else
- {
- NYI_assert (19, 19, 1);
- NYI_assert (13, 11, 0);
- src_index = INSTR (14, 14);
- dst_index = INSTR (20, 20);
- aarch64_set_vec_u64 (cpu, vd, dst_index,
- aarch64_get_vec_u64 (cpu, vs, src_index));
- }
- }
- static void
- do_vec_REV32 (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half
- instr[29,24] = 10 1110
- instr[23,22] = size
- instr[21,10] = 10 0000 0000 10
- instr[9,5] = Rn
- instr[4,0] = Rd. */
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned size = INSTR (23, 22);
- unsigned full = INSTR (30, 30);
- unsigned i;
- FRegister val;
- NYI_assert (29, 24, 0x2E);
- NYI_assert (21, 10, 0x802);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (size)
- {
- case 0:
- for (i = 0; i < (full ? 16 : 8); i++)
- val.b[i ^ 0x3] = aarch64_get_vec_u8 (cpu, rn, i);
- break;
- case 1:
- for (i = 0; i < (full ? 8 : 4); i++)
- val.h[i ^ 0x1] = aarch64_get_vec_u16 (cpu, rn, i);
- break;
- default:
- HALT_UNALLOC;
- }
- aarch64_set_vec_u64 (cpu, rd, 0, val.v[0]);
- if (full)
- aarch64_set_vec_u64 (cpu, rd, 1, val.v[1]);
- }
- static void
- do_vec_EXT (sim_cpu *cpu)
- {
- /* instr[31] = 0
- instr[30] = full/half
- instr[29,21] = 10 1110 000
- instr[20,16] = Vm
- instr[15] = 0
- instr[14,11] = source index
- instr[10] = 0
- instr[9,5] = Vn
- instr[4.0] = Vd. */
- unsigned vm = INSTR (20, 16);
- unsigned vn = INSTR (9, 5);
- unsigned vd = INSTR (4, 0);
- unsigned src_index = INSTR (14, 11);
- unsigned full = INSTR (30, 30);
- unsigned i;
- unsigned j;
- FRegister val;
- NYI_assert (31, 21, 0x370);
- NYI_assert (15, 15, 0);
- NYI_assert (10, 10, 0);
- if (!full && (src_index & 0x8))
- HALT_UNALLOC;
- j = 0;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- for (i = src_index; i < (full ? 16 : 8); i++)
- val.b[j ++] = aarch64_get_vec_u8 (cpu, vn, i);
- for (i = 0; i < src_index; i++)
- val.b[j ++] = aarch64_get_vec_u8 (cpu, vm, i);
- aarch64_set_vec_u64 (cpu, vd, 0, val.v[0]);
- if (full)
- aarch64_set_vec_u64 (cpu, vd, 1, val.v[1]);
- }
- static void
- dexAdvSIMD0 (sim_cpu *cpu)
- {
- /* instr [28,25] = 0 111. */
- if ( INSTR (15, 10) == 0x07
- && (INSTR (9, 5) ==
- INSTR (20, 16)))
- {
- if (INSTR (31, 21) == 0x075
- || INSTR (31, 21) == 0x275)
- {
- do_vec_MOV_whole_vector (cpu);
- return;
- }
- }
- if (INSTR (29, 19) == 0x1E0)
- {
- do_vec_MOV_immediate (cpu);
- return;
- }
- if (INSTR (29, 19) == 0x5E0)
- {
- do_vec_MVNI (cpu);
- return;
- }
- if (INSTR (29, 19) == 0x1C0
- || INSTR (29, 19) == 0x1C1)
- {
- if (INSTR (15, 10) == 0x03)
- {
- do_vec_DUP_scalar_into_vector (cpu);
- return;
- }
- }
- switch (INSTR (29, 24))
- {
- case 0x0E: do_vec_op1 (cpu); return;
- case 0x0F: do_vec_op2 (cpu); return;
- case 0x2E:
- if (INSTR (21, 21) == 1)
- {
- switch (INSTR (15, 10))
- {
- case 0x02:
- do_vec_REV32 (cpu);
- return;
- case 0x07:
- switch (INSTR (23, 22))
- {
- case 0: do_vec_EOR (cpu); return;
- case 1: do_vec_BSL (cpu); return;
- case 2:
- case 3: do_vec_bit (cpu); return;
- }
- break;
- case 0x08: do_vec_sub_long (cpu); return;
- case 0x11: do_vec_USHL (cpu); return;
- case 0x12: do_vec_CLZ (cpu); return;
- case 0x16: do_vec_NOT (cpu); return;
- case 0x19: do_vec_max (cpu); return;
- case 0x1B: do_vec_min (cpu); return;
- case 0x21: do_vec_SUB (cpu); return;
- case 0x25: do_vec_MLS (cpu); return;
- case 0x31: do_vec_FminmaxNMP (cpu); return;
- case 0x35: do_vec_FADDP (cpu); return;
- case 0x37: do_vec_FMUL (cpu); return;
- case 0x3F: do_vec_FDIV (cpu); return;
- case 0x3E:
- switch (INSTR (20, 16))
- {
- case 0x00: do_vec_FNEG (cpu); return;
- case 0x01: do_vec_FSQRT (cpu); return;
- default: HALT_NYI;
- }
- case 0x0D:
- case 0x0F:
- case 0x22:
- case 0x23:
- case 0x26:
- case 0x2A:
- case 0x32:
- case 0x36:
- case 0x39:
- case 0x3A:
- do_vec_compare (cpu); return;
- default:
- break;
- }
- }
- if (INSTR (31, 21) == 0x370)
- {
- if (INSTR (10, 10))
- do_vec_MOV_element (cpu);
- else
- do_vec_EXT (cpu);
- return;
- }
- switch (INSTR (21, 10))
- {
- case 0x82E: do_vec_neg (cpu); return;
- case 0x87E: do_vec_sqrt (cpu); return;
- default:
- if (INSTR (15, 10) == 0x30)
- {
- do_vec_mull (cpu);
- return;
- }
- break;
- }
- break;
- case 0x2f:
- switch (INSTR (15, 10))
- {
- case 0x01: do_vec_SSHR_USHR (cpu); return;
- case 0x10:
- case 0x12: do_vec_mls_indexed (cpu); return;
- case 0x29: do_vec_xtl (cpu); return;
- default:
- HALT_NYI;
- }
- default:
- break;
- }
- HALT_NYI;
- }
- /* 3 sources. */
- /* Float multiply add. */
- static void
- fmadds (sim_cpu *cpu)
- {
- unsigned sa = INSTR (14, 10);
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sa)
- + aarch64_get_FP_float (cpu, sn)
- * aarch64_get_FP_float (cpu, sm));
- }
- /* Double multiply add. */
- static void
- fmaddd (sim_cpu *cpu)
- {
- unsigned sa = INSTR (14, 10);
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sa)
- + aarch64_get_FP_double (cpu, sn)
- * aarch64_get_FP_double (cpu, sm));
- }
- /* Float multiply subtract. */
- static void
- fmsubs (sim_cpu *cpu)
- {
- unsigned sa = INSTR (14, 10);
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sa)
- - aarch64_get_FP_float (cpu, sn)
- * aarch64_get_FP_float (cpu, sm));
- }
- /* Double multiply subtract. */
- static void
- fmsubd (sim_cpu *cpu)
- {
- unsigned sa = INSTR (14, 10);
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sa)
- - aarch64_get_FP_double (cpu, sn)
- * aarch64_get_FP_double (cpu, sm));
- }
- /* Float negative multiply add. */
- static void
- fnmadds (sim_cpu *cpu)
- {
- unsigned sa = INSTR (14, 10);
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sa)
- + (- aarch64_get_FP_float (cpu, sn))
- * aarch64_get_FP_float (cpu, sm));
- }
- /* Double negative multiply add. */
- static void
- fnmaddd (sim_cpu *cpu)
- {
- unsigned sa = INSTR (14, 10);
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sa)
- + (- aarch64_get_FP_double (cpu, sn))
- * aarch64_get_FP_double (cpu, sm));
- }
- /* Float negative multiply subtract. */
- static void
- fnmsubs (sim_cpu *cpu)
- {
- unsigned sa = INSTR (14, 10);
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sa)
- + aarch64_get_FP_float (cpu, sn)
- * aarch64_get_FP_float (cpu, sm));
- }
- /* Double negative multiply subtract. */
- static void
- fnmsubd (sim_cpu *cpu)
- {
- unsigned sa = INSTR (14, 10);
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sa)
- + aarch64_get_FP_double (cpu, sn)
- * aarch64_get_FP_double (cpu, sm));
- }
- static void
- dexSimpleFPDataProc3Source (sim_cpu *cpu)
- {
- /* instr[31] ==> M : 0 ==> OK, 1 ==> UNALLOC
- instr[30] = 0
- instr[29] ==> S : 0 ==> OK, 1 ==> UNALLOC
- instr[28,25] = 1111
- instr[24] = 1
- instr[23,22] ==> type : 0 ==> single, 01 ==> double, 1x ==> UNALLOC
- instr[21] ==> o1 : 0 ==> unnegated, 1 ==> negated
- instr[15] ==> o2 : 0 ==> ADD, 1 ==> SUB */
- uint32_t M_S = (INSTR (31, 31) << 1) | INSTR (29, 29);
- /* dispatch on combined type:o1:o2. */
- uint32_t dispatch = (INSTR (23, 21) << 1) | INSTR (15, 15);
- if (M_S != 0)
- HALT_UNALLOC;
- switch (dispatch)
- {
- case 0: fmadds (cpu); return;
- case 1: fmsubs (cpu); return;
- case 2: fnmadds (cpu); return;
- case 3: fnmsubs (cpu); return;
- case 4: fmaddd (cpu); return;
- case 5: fmsubd (cpu); return;
- case 6: fnmaddd (cpu); return;
- case 7: fnmsubd (cpu); return;
- default:
- /* type > 1 is currently unallocated. */
- HALT_UNALLOC;
- }
- }
- static void
- dexSimpleFPFixedConvert (sim_cpu *cpu)
- {
- HALT_NYI;
- }
- static void
- dexSimpleFPCondCompare (sim_cpu *cpu)
- {
- /* instr [31,23] = 0001 1110 0
- instr [22] = type
- instr [21] = 1
- instr [20,16] = Rm
- instr [15,12] = condition
- instr [11,10] = 01
- instr [9,5] = Rn
- instr [4] = 0
- instr [3,0] = nzcv */
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- NYI_assert (31, 23, 0x3C);
- NYI_assert (11, 10, 0x1);
- NYI_assert (4, 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (! testConditionCode (cpu, INSTR (15, 12)))
- {
- aarch64_set_CPSR (cpu, INSTR (3, 0));
- return;
- }
- if (INSTR (22, 22))
- {
- /* Double precision. */
- double val1 = aarch64_get_vec_double (cpu, rn, 0);
- double val2 = aarch64_get_vec_double (cpu, rm, 0);
- /* FIXME: Check for NaNs. */
- if (val1 == val2)
- aarch64_set_CPSR (cpu, (Z | C));
- else if (val1 < val2)
- aarch64_set_CPSR (cpu, N);
- else /* val1 > val2 */
- aarch64_set_CPSR (cpu, C);
- }
- else
- {
- /* Single precision. */
- float val1 = aarch64_get_vec_float (cpu, rn, 0);
- float val2 = aarch64_get_vec_float (cpu, rm, 0);
- /* FIXME: Check for NaNs. */
- if (val1 == val2)
- aarch64_set_CPSR (cpu, (Z | C));
- else if (val1 < val2)
- aarch64_set_CPSR (cpu, N);
- else /* val1 > val2 */
- aarch64_set_CPSR (cpu, C);
- }
- }
- /* 2 sources. */
- /* Float add. */
- static void
- fadds (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
- + aarch64_get_FP_float (cpu, sm));
- }
- /* Double add. */
- static void
- faddd (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
- + aarch64_get_FP_double (cpu, sm));
- }
- /* Float divide. */
- static void
- fdivs (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
- / aarch64_get_FP_float (cpu, sm));
- }
- /* Double divide. */
- static void
- fdivd (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
- / aarch64_get_FP_double (cpu, sm));
- }
- /* Float multiply. */
- static void
- fmuls (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
- * aarch64_get_FP_float (cpu, sm));
- }
- /* Double multiply. */
- static void
- fmuld (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
- * aarch64_get_FP_double (cpu, sm));
- }
- /* Float negate and multiply. */
- static void
- fnmuls (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, - (aarch64_get_FP_float (cpu, sn)
- * aarch64_get_FP_float (cpu, sm)));
- }
- /* Double negate and multiply. */
- static void
- fnmuld (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, - (aarch64_get_FP_double (cpu, sn)
- * aarch64_get_FP_double (cpu, sm)));
- }
- /* Float subtract. */
- static void
- fsubs (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
- - aarch64_get_FP_float (cpu, sm));
- }
- /* Double subtract. */
- static void
- fsubd (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
- - aarch64_get_FP_double (cpu, sm));
- }
- static void
- do_FMINNM (sim_cpu *cpu)
- {
- /* instr[31,23] = 0 0011 1100
- instr[22] = float(0)/double(1)
- instr[21] = 1
- instr[20,16] = Sm
- instr[15,10] = 01 1110
- instr[9,5] = Sn
- instr[4,0] = Cpu */
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- NYI_assert (31, 23, 0x03C);
- NYI_assert (15, 10, 0x1E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- aarch64_set_FP_double (cpu, sd,
- dminnm (aarch64_get_FP_double (cpu, sn),
- aarch64_get_FP_double (cpu, sm)));
- else
- aarch64_set_FP_float (cpu, sd,
- fminnm (aarch64_get_FP_float (cpu, sn),
- aarch64_get_FP_float (cpu, sm)));
- }
- static void
- do_FMAXNM (sim_cpu *cpu)
- {
- /* instr[31,23] = 0 0011 1100
- instr[22] = float(0)/double(1)
- instr[21] = 1
- instr[20,16] = Sm
- instr[15,10] = 01 1010
- instr[9,5] = Sn
- instr[4,0] = Cpu */
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- NYI_assert (31, 23, 0x03C);
- NYI_assert (15, 10, 0x1A);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- aarch64_set_FP_double (cpu, sd,
- dmaxnm (aarch64_get_FP_double (cpu, sn),
- aarch64_get_FP_double (cpu, sm)));
- else
- aarch64_set_FP_float (cpu, sd,
- fmaxnm (aarch64_get_FP_float (cpu, sn),
- aarch64_get_FP_float (cpu, sm)));
- }
- static void
- dexSimpleFPDataProc2Source (sim_cpu *cpu)
- {
- /* instr[31] ==> M : 0 ==> OK, 1 ==> UNALLOC
- instr[30] = 0
- instr[29] ==> S : 0 ==> OK, 1 ==> UNALLOC
- instr[28,25] = 1111
- instr[24] = 0
- instr[23,22] ==> type : 0 ==> single, 01 ==> double, 1x ==> UNALLOC
- instr[21] = 1
- instr[20,16] = Vm
- instr[15,12] ==> opcode : 0000 ==> FMUL, 0001 ==> FDIV
- 0010 ==> FADD, 0011 ==> FSUB,
- 0100 ==> FMAX, 0101 ==> FMIN
- 0110 ==> FMAXNM, 0111 ==> FMINNM
- 1000 ==> FNMUL, ow ==> UNALLOC
- instr[11,10] = 10
- instr[9,5] = Vn
- instr[4,0] = Vd */
- uint32_t M_S = (INSTR (31, 31) << 1) | INSTR (29, 29);
- uint32_t type = INSTR (23, 22);
- /* Dispatch on opcode. */
- uint32_t dispatch = INSTR (15, 12);
- if (type > 1)
- HALT_UNALLOC;
- if (M_S != 0)
- HALT_UNALLOC;
- if (type)
- switch (dispatch)
- {
- case 0: fmuld (cpu); return;
- case 1: fdivd (cpu); return;
- case 2: faddd (cpu); return;
- case 3: fsubd (cpu); return;
- case 6: do_FMAXNM (cpu); return;
- case 7: do_FMINNM (cpu); return;
- case 8: fnmuld (cpu); return;
- /* Have not yet implemented fmax and fmin. */
- case 4:
- case 5:
- HALT_NYI;
- default:
- HALT_UNALLOC;
- }
- else /* type == 0 => floats. */
- switch (dispatch)
- {
- case 0: fmuls (cpu); return;
- case 1: fdivs (cpu); return;
- case 2: fadds (cpu); return;
- case 3: fsubs (cpu); return;
- case 6: do_FMAXNM (cpu); return;
- case 7: do_FMINNM (cpu); return;
- case 8: fnmuls (cpu); return;
- case 4:
- case 5:
- HALT_NYI;
- default:
- HALT_UNALLOC;
- }
- }
- static void
- dexSimpleFPCondSelect (sim_cpu *cpu)
- {
- /* FCSEL
- instr[31,23] = 0 0011 1100
- instr[22] = 0=>single 1=>double
- instr[21] = 1
- instr[20,16] = Sm
- instr[15,12] = cond
- instr[11,10] = 11
- instr[9,5] = Sn
- instr[4,0] = Cpu */
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- unsigned sd = INSTR ( 4, 0);
- uint32_t set = testConditionCode (cpu, INSTR (15, 12));
- NYI_assert (31, 23, 0x03C);
- NYI_assert (11, 10, 0x3);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- aarch64_set_FP_double (cpu, sd, (set ? aarch64_get_FP_double (cpu, sn)
- : aarch64_get_FP_double (cpu, sm)));
- else
- aarch64_set_FP_float (cpu, sd, (set ? aarch64_get_FP_float (cpu, sn)
- : aarch64_get_FP_float (cpu, sm)));
- }
- /* Store 32 bit unscaled signed 9 bit. */
- static void
- fsturs (sim_cpu *cpu, int32_t offset)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u32 (cpu, aarch64_get_reg_u64 (cpu, rn, 1) + offset,
- aarch64_get_vec_u32 (cpu, st, 0));
- }
- /* Store 64 bit unscaled signed 9 bit. */
- static void
- fsturd (sim_cpu *cpu, int32_t offset)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_mem_u64 (cpu, aarch64_get_reg_u64 (cpu, rn, 1) + offset,
- aarch64_get_vec_u64 (cpu, st, 0));
- }
- /* Store 128 bit unscaled signed 9 bit. */
- static void
- fsturq (sim_cpu *cpu, int32_t offset)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- FRegister a;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_get_FP_long_double (cpu, st, & a);
- aarch64_set_mem_long_double (cpu,
- aarch64_get_reg_u64 (cpu, rn, 1)
- + offset, a);
- }
- /* TODO FP move register. */
- /* 32 bit fp to fp move register. */
- static void
- ffmovs (sim_cpu *cpu)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, st, aarch64_get_FP_float (cpu, rn));
- }
- /* 64 bit fp to fp move register. */
- static void
- ffmovd (sim_cpu *cpu)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, st, aarch64_get_FP_double (cpu, rn));
- }
- /* 32 bit GReg to Vec move register. */
- static void
- fgmovs (sim_cpu *cpu)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_reg_u32 (cpu, rn, NO_SP));
- }
- /* 64 bit g to fp move register. */
- static void
- fgmovd (sim_cpu *cpu)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u64 (cpu, st, 0, aarch64_get_reg_u64 (cpu, rn, NO_SP));
- }
- /* 32 bit fp to g move register. */
- static void
- gfmovs (sim_cpu *cpu)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, st, NO_SP, aarch64_get_vec_u32 (cpu, rn, 0));
- }
- /* 64 bit fp to g move register. */
- static void
- gfmovd (sim_cpu *cpu)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, st, NO_SP, aarch64_get_vec_u64 (cpu, rn, 0));
- }
- /* FP move immediate
- These install an immediate 8 bit value in the target register
- where the 8 bits comprise 1 sign bit, 4 bits of fraction and a 3
- bit exponent. */
- static void
- fmovs (sim_cpu *cpu)
- {
- unsigned int sd = INSTR (4, 0);
- uint32_t imm = INSTR (20, 13);
- float f = fp_immediate_for_encoding_32 (imm);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, f);
- }
- static void
- fmovd (sim_cpu *cpu)
- {
- unsigned int sd = INSTR (4, 0);
- uint32_t imm = INSTR (20, 13);
- double d = fp_immediate_for_encoding_64 (imm);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, d);
- }
- static void
- dexSimpleFPImmediate (sim_cpu *cpu)
- {
- /* instr[31,23] == 00111100
- instr[22] == type : single(0)/double(1)
- instr[21] == 1
- instr[20,13] == imm8
- instr[12,10] == 100
- instr[9,5] == imm5 : 00000 ==> PK, ow ==> UNALLOC
- instr[4,0] == Rd */
- uint32_t imm5 = INSTR (9, 5);
- NYI_assert (31, 23, 0x3C);
- if (imm5 != 0)
- HALT_UNALLOC;
- if (INSTR (22, 22))
- fmovd (cpu);
- else
- fmovs (cpu);
- }
- /* TODO specific decode and execute for group Load Store. */
- /* TODO FP load/store single register (unscaled offset). */
- /* TODO load 8 bit unscaled signed 9 bit. */
- /* TODO load 16 bit unscaled signed 9 bit. */
- /* Load 32 bit unscaled signed 9 bit. */
- static void
- fldurs (sim_cpu *cpu, int32_t offset)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_mem_u32
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset));
- }
- /* Load 64 bit unscaled signed 9 bit. */
- static void
- fldurd (sim_cpu *cpu, int32_t offset)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u64 (cpu, st, 0, aarch64_get_mem_u64
- (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset));
- }
- /* Load 128 bit unscaled signed 9 bit. */
- static void
- fldurq (sim_cpu *cpu, int32_t offset)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int st = INSTR (4, 0);
- FRegister a;
- uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_get_mem_long_double (cpu, addr, & a);
- aarch64_set_FP_long_double (cpu, st, a);
- }
- /* TODO store 8 bit unscaled signed 9 bit. */
- /* TODO store 16 bit unscaled signed 9 bit. */
- /* 1 source. */
- /* Float absolute value. */
- static void
- fabss (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- float value = aarch64_get_FP_float (cpu, sn);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, fabsf (value));
- }
- /* Double absolute value. */
- static void
- fabcpu (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- double value = aarch64_get_FP_double (cpu, sn);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, fabs (value));
- }
- /* Float negative value. */
- static void
- fnegs (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sn));
- }
- /* Double negative value. */
- static void
- fnegd (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sn));
- }
- /* Float square root. */
- static void
- fsqrts (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, sqrtf (aarch64_get_FP_float (cpu, sn)));
- }
- /* Double square root. */
- static void
- fsqrtd (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd,
- sqrt (aarch64_get_FP_double (cpu, sn)));
- }
- /* Convert double to float. */
- static void
- fcvtds (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, sd, (float) aarch64_get_FP_double (cpu, sn));
- }
- /* Convert float to double. */
- static void
- fcvtcpu (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, sd, (double) aarch64_get_FP_float (cpu, sn));
- }
- static void
- do_FRINT (sim_cpu *cpu)
- {
- /* instr[31,23] = 0001 1110 0
- instr[22] = single(0)/double(1)
- instr[21,18] = 1001
- instr[17,15] = rounding mode
- instr[14,10] = 10000
- instr[9,5] = source
- instr[4,0] = dest */
- float val;
- unsigned rs = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned int rmode = INSTR (17, 15);
- NYI_assert (31, 23, 0x03C);
- NYI_assert (21, 18, 0x9);
- NYI_assert (14, 10, 0x10);
- if (rmode == 6 || rmode == 7)
- /* FIXME: Add support for rmode == 6 exactness check. */
- rmode = uimm (aarch64_get_FPSR (cpu), 23, 22);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- double val = aarch64_get_FP_double (cpu, rs);
- switch (rmode)
- {
- case 0: /* mode N: nearest or even. */
- {
- double rval = round (val);
- if (val - rval == 0.5)
- {
- if (((rval / 2.0) * 2.0) != rval)
- rval += 1.0;
- }
- aarch64_set_FP_double (cpu, rd, round (val));
- return;
- }
- case 1: /* mode P: towards +inf. */
- if (val < 0.0)
- aarch64_set_FP_double (cpu, rd, trunc (val));
- else
- aarch64_set_FP_double (cpu, rd, round (val));
- return;
- case 2: /* mode M: towards -inf. */
- if (val < 0.0)
- aarch64_set_FP_double (cpu, rd, round (val));
- else
- aarch64_set_FP_double (cpu, rd, trunc (val));
- return;
- case 3: /* mode Z: towards 0. */
- aarch64_set_FP_double (cpu, rd, trunc (val));
- return;
- case 4: /* mode A: away from 0. */
- aarch64_set_FP_double (cpu, rd, round (val));
- return;
- case 6: /* mode X: use FPCR with exactness check. */
- case 7: /* mode I: use FPCR mode. */
- HALT_NYI;
- default:
- HALT_UNALLOC;
- }
- }
- val = aarch64_get_FP_float (cpu, rs);
- switch (rmode)
- {
- case 0: /* mode N: nearest or even. */
- {
- float rval = roundf (val);
- if (val - rval == 0.5)
- {
- if (((rval / 2.0) * 2.0) != rval)
- rval += 1.0;
- }
- aarch64_set_FP_float (cpu, rd, rval);
- return;
- }
- case 1: /* mode P: towards +inf. */
- if (val < 0.0)
- aarch64_set_FP_float (cpu, rd, truncf (val));
- else
- aarch64_set_FP_float (cpu, rd, roundf (val));
- return;
- case 2: /* mode M: towards -inf. */
- if (val < 0.0)
- aarch64_set_FP_float (cpu, rd, truncf (val));
- else
- aarch64_set_FP_float (cpu, rd, roundf (val));
- return;
- case 3: /* mode Z: towards 0. */
- aarch64_set_FP_float (cpu, rd, truncf (val));
- return;
- case 4: /* mode A: away from 0. */
- aarch64_set_FP_float (cpu, rd, roundf (val));
- return;
- case 6: /* mode X: use FPCR with exactness check. */
- case 7: /* mode I: use FPCR mode. */
- HALT_NYI;
- default:
- HALT_UNALLOC;
- }
- }
- /* Convert half to float. */
- static void
- do_FCVT_half_to_single (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 10, 0x7B890);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float (cpu, rd, (float) aarch64_get_FP_half (cpu, rn));
- }
- /* Convert half to double. */
- static void
- do_FCVT_half_to_double (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 10, 0x7B8B0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double (cpu, rd, (double) aarch64_get_FP_half (cpu, rn));
- }
- static void
- do_FCVT_single_to_half (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 10, 0x788F0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_half (cpu, rd, aarch64_get_FP_float (cpu, rn));
- }
- /* Convert double to half. */
- static void
- do_FCVT_double_to_half (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 10, 0x798F0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_half (cpu, rd, (float) aarch64_get_FP_double (cpu, rn));
- }
- static void
- dexSimpleFPDataProc1Source (sim_cpu *cpu)
- {
- /* instr[31] ==> M : 0 ==> OK, 1 ==> UNALLOC
- instr[30] = 0
- instr[29] ==> S : 0 ==> OK, 1 ==> UNALLOC
- instr[28,25] = 1111
- instr[24] = 0
- instr[23,22] ==> type : 00 ==> source is single,
- 01 ==> source is double
- 10 ==> UNALLOC
- 11 ==> UNALLOC or source is half
- instr[21] = 1
- instr[20,15] ==> opcode : with type 00 or 01
- 000000 ==> FMOV, 000001 ==> FABS,
- 000010 ==> FNEG, 000011 ==> FSQRT,
- 000100 ==> UNALLOC, 000101 ==> FCVT,(to single/double)
- 000110 ==> UNALLOC, 000111 ==> FCVT (to half)
- 001000 ==> FRINTN, 001001 ==> FRINTP,
- 001010 ==> FRINTM, 001011 ==> FRINTZ,
- 001100 ==> FRINTA, 001101 ==> UNALLOC
- 001110 ==> FRINTX, 001111 ==> FRINTI
- with type 11
- 000100 ==> FCVT (half-to-single)
- 000101 ==> FCVT (half-to-double)
- instr[14,10] = 10000. */
- uint32_t M_S = (INSTR (31, 31) << 1) | INSTR (29, 29);
- uint32_t type = INSTR (23, 22);
- uint32_t opcode = INSTR (20, 15);
- if (M_S != 0)
- HALT_UNALLOC;
- if (type == 3)
- {
- if (opcode == 4)
- do_FCVT_half_to_single (cpu);
- else if (opcode == 5)
- do_FCVT_half_to_double (cpu);
- else
- HALT_UNALLOC;
- return;
- }
- if (type == 2)
- HALT_UNALLOC;
- switch (opcode)
- {
- case 0:
- if (type)
- ffmovd (cpu);
- else
- ffmovs (cpu);
- return;
- case 1:
- if (type)
- fabcpu (cpu);
- else
- fabss (cpu);
- return;
- case 2:
- if (type)
- fnegd (cpu);
- else
- fnegs (cpu);
- return;
- case 3:
- if (type)
- fsqrtd (cpu);
- else
- fsqrts (cpu);
- return;
- case 4:
- if (type)
- fcvtds (cpu);
- else
- HALT_UNALLOC;
- return;
- case 5:
- if (type)
- HALT_UNALLOC;
- fcvtcpu (cpu);
- return;
- case 8: /* FRINTN etc. */
- case 9:
- case 10:
- case 11:
- case 12:
- case 14:
- case 15:
- do_FRINT (cpu);
- return;
- case 7:
- if (INSTR (22, 22))
- do_FCVT_double_to_half (cpu);
- else
- do_FCVT_single_to_half (cpu);
- return;
- case 13:
- HALT_NYI;
- default:
- HALT_UNALLOC;
- }
- }
- /* 32 bit signed int to float. */
- static void
- scvtf32 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float
- (cpu, sd, (float) aarch64_get_reg_s32 (cpu, rn, NO_SP));
- }
- /* signed int to float. */
- static void
- scvtf (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_float
- (cpu, sd, (float) aarch64_get_reg_s64 (cpu, rn, NO_SP));
- }
- /* 32 bit signed int to double. */
- static void
- scvtd32 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double
- (cpu, sd, (double) aarch64_get_reg_s32 (cpu, rn, NO_SP));
- }
- /* signed int to double. */
- static void
- scvtd (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned sd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_FP_double
- (cpu, sd, (double) aarch64_get_reg_s64 (cpu, rn, NO_SP));
- }
- static const float FLOAT_INT_MAX = (float) INT_MAX;
- static const float FLOAT_INT_MIN = (float) INT_MIN;
- static const double DOUBLE_INT_MAX = (double) INT_MAX;
- static const double DOUBLE_INT_MIN = (double) INT_MIN;
- static const float FLOAT_LONG_MAX = (float) LONG_MAX;
- static const float FLOAT_LONG_MIN = (float) LONG_MIN;
- static const double DOUBLE_LONG_MAX = (double) LONG_MAX;
- static const double DOUBLE_LONG_MIN = (double) LONG_MIN;
- #define UINT_MIN 0
- #define ULONG_MIN 0
- static const float FLOAT_UINT_MAX = (float) UINT_MAX;
- static const float FLOAT_UINT_MIN = (float) UINT_MIN;
- static const double DOUBLE_UINT_MAX = (double) UINT_MAX;
- static const double DOUBLE_UINT_MIN = (double) UINT_MIN;
- static const float FLOAT_ULONG_MAX = (float) ULONG_MAX;
- static const float FLOAT_ULONG_MIN = (float) ULONG_MIN;
- static const double DOUBLE_ULONG_MAX = (double) ULONG_MAX;
- static const double DOUBLE_ULONG_MIN = (double) ULONG_MIN;
- /* Check for FP exception conditions:
- NaN raises IO
- Infinity raises IO
- Out of Range raises IO and IX and saturates value
- Denormal raises ID and IX and sets to zero. */
- #define RAISE_EXCEPTIONS(F, VALUE, FTYPE, ITYPE) \
- do \
- { \
- switch (fpclassify (F)) \
- { \
- case FP_INFINITE: \
- case FP_NAN: \
- aarch64_set_FPSR (cpu, IO); \
- if (signbit (F)) \
- VALUE = ITYPE##_MAX; \
- else \
- VALUE = ITYPE##_MIN; \
- break; \
- \
- case FP_NORMAL: \
- if (F >= FTYPE##_##ITYPE##_MAX) \
- { \
- aarch64_set_FPSR_bits (cpu, IO | IX, IO | IX); \
- VALUE = ITYPE##_MAX; \
- } \
- else if (F <= FTYPE##_##ITYPE##_MIN) \
- { \
- aarch64_set_FPSR_bits (cpu, IO | IX, IO | IX); \
- VALUE = ITYPE##_MIN; \
- } \
- break; \
- \
- case FP_SUBNORMAL: \
- aarch64_set_FPSR_bits (cpu, IO | IX | ID, IX | ID); \
- VALUE = 0; \
- break; \
- \
- default: \
- case FP_ZERO: \
- VALUE = 0; \
- break; \
- } \
- } \
- while (0)
- /* 32 bit convert float to signed int truncate towards zero. */
- static void
- fcvtszs32 (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* TODO : check that this rounds toward zero. */
- float f = aarch64_get_FP_float (cpu, sn);
- int32_t value = (int32_t) f;
- RAISE_EXCEPTIONS (f, value, FLOAT, INT);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* Avoid sign extension to 64 bit. */
- aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) value);
- }
- /* 64 bit convert float to signed int truncate towards zero. */
- static void
- fcvtszs (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- float f = aarch64_get_FP_float (cpu, sn);
- int64_t value = (int64_t) f;
- RAISE_EXCEPTIONS (f, value, FLOAT, LONG);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_s64 (cpu, rd, NO_SP, value);
- }
- /* 32 bit convert double to signed int truncate towards zero. */
- static void
- fcvtszd32 (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* TODO : check that this rounds toward zero. */
- double d = aarch64_get_FP_double (cpu, sn);
- int32_t value = (int32_t) d;
- RAISE_EXCEPTIONS (d, value, DOUBLE, INT);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* Avoid sign extension to 64 bit. */
- aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) value);
- }
- /* 64 bit convert double to signed int truncate towards zero. */
- static void
- fcvtszd (sim_cpu *cpu)
- {
- unsigned sn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* TODO : check that this rounds toward zero. */
- double d = aarch64_get_FP_double (cpu, sn);
- int64_t value;
- value = (int64_t) d;
- RAISE_EXCEPTIONS (d, value, DOUBLE, LONG);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_s64 (cpu, rd, NO_SP, value);
- }
- static void
- do_fcvtzu (sim_cpu *cpu)
- {
- /* instr[31] = size: 32-bit (0), 64-bit (1)
- instr[30,23] = 00111100
- instr[22] = type: single (0)/ double (1)
- instr[21] = enable (0)/disable(1) precision
- instr[20,16] = 11001
- instr[15,10] = precision
- instr[9,5] = Rs
- instr[4,0] = Rd. */
- unsigned rs = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (30, 23, 0x3C);
- NYI_assert (20, 16, 0x19);
- if (INSTR (21, 21) != 1)
- /* Convert to fixed point. */
- HALT_NYI;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (31, 31))
- {
- /* Convert to unsigned 64-bit integer. */
- if (INSTR (22, 22))
- {
- double d = aarch64_get_FP_double (cpu, rs);
- uint64_t value = (uint64_t) d;
- /* Do not raise an exception if we have reached ULONG_MAX. */
- if (value != (1ULL << 63))
- RAISE_EXCEPTIONS (d, value, DOUBLE, ULONG);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
- }
- else
- {
- float f = aarch64_get_FP_float (cpu, rs);
- uint64_t value = (uint64_t) f;
- /* Do not raise an exception if we have reached ULONG_MAX. */
- if (value != (1ULL << 63))
- RAISE_EXCEPTIONS (f, value, FLOAT, ULONG);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
- }
- }
- else
- {
- uint32_t value;
- /* Convert to unsigned 32-bit integer. */
- if (INSTR (22, 22))
- {
- double d = aarch64_get_FP_double (cpu, rs);
- value = (uint32_t) d;
- /* Do not raise an exception if we have reached UINT_MAX. */
- if (value != (1UL << 31))
- RAISE_EXCEPTIONS (d, value, DOUBLE, UINT);
- }
- else
- {
- float f = aarch64_get_FP_float (cpu, rs);
- value = (uint32_t) f;
- /* Do not raise an exception if we have reached UINT_MAX. */
- if (value != (1UL << 31))
- RAISE_EXCEPTIONS (f, value, FLOAT, UINT);
- }
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
- }
- }
- static void
- do_UCVTF (sim_cpu *cpu)
- {
- /* instr[31] = size: 32-bit (0), 64-bit (1)
- instr[30,23] = 001 1110 0
- instr[22] = type: single (0)/ double (1)
- instr[21] = enable (0)/disable(1) precision
- instr[20,16] = 0 0011
- instr[15,10] = precision
- instr[9,5] = Rs
- instr[4,0] = Rd. */
- unsigned rs = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (30, 23, 0x3C);
- NYI_assert (20, 16, 0x03);
- if (INSTR (21, 21) != 1)
- HALT_NYI;
- /* FIXME: Add exception raising. */
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (31, 31))
- {
- uint64_t value = aarch64_get_reg_u64 (cpu, rs, NO_SP);
- if (INSTR (22, 22))
- aarch64_set_FP_double (cpu, rd, (double) value);
- else
- aarch64_set_FP_float (cpu, rd, (float) value);
- }
- else
- {
- uint32_t value = aarch64_get_reg_u32 (cpu, rs, NO_SP);
- if (INSTR (22, 22))
- aarch64_set_FP_double (cpu, rd, (double) value);
- else
- aarch64_set_FP_float (cpu, rd, (float) value);
- }
- }
- static void
- float_vector_move (sim_cpu *cpu)
- {
- /* instr[31,17] == 100 1111 0101 0111
- instr[16] ==> direction 0=> to GR, 1=> from GR
- instr[15,10] => ???
- instr[9,5] ==> source
- instr[4,0] ==> dest. */
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 17, 0x4F57);
- if (INSTR (15, 10) != 0)
- HALT_UNALLOC;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (16, 16))
- aarch64_set_vec_u64 (cpu, rd, 1, aarch64_get_reg_u64 (cpu, rn, NO_SP));
- else
- aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_vec_u64 (cpu, rn, 1));
- }
- static void
- dexSimpleFPIntegerConvert (sim_cpu *cpu)
- {
- /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30 = 0
- instr[29] = S : 0 ==> OK, 1 ==> UNALLOC
- instr[28,25] = 1111
- instr[24] = 0
- instr[23,22] = type : 00 ==> single, 01 ==> double, 1x ==> UNALLOC
- instr[21] = 1
- instr[20,19] = rmode
- instr[18,16] = opcode
- instr[15,10] = 10 0000 */
- uint32_t rmode_opcode;
- uint32_t size_type;
- uint32_t type;
- uint32_t size;
- uint32_t S;
- if (INSTR (31, 17) == 0x4F57)
- {
- float_vector_move (cpu);
- return;
- }
- size = INSTR (31, 31);
- S = INSTR (29, 29);
- if (S != 0)
- HALT_UNALLOC;
- type = INSTR (23, 22);
- if (type > 1)
- HALT_UNALLOC;
- rmode_opcode = INSTR (20, 16);
- size_type = (size << 1) | type; /* 0==32f, 1==32d, 2==64f, 3==64d. */
- switch (rmode_opcode)
- {
- case 2: /* SCVTF. */
- switch (size_type)
- {
- case 0: scvtf32 (cpu); return;
- case 1: scvtd32 (cpu); return;
- case 2: scvtf (cpu); return;
- case 3: scvtd (cpu); return;
- }
- case 6: /* FMOV GR, Vec. */
- switch (size_type)
- {
- case 0: gfmovs (cpu); return;
- case 3: gfmovd (cpu); return;
- default: HALT_UNALLOC;
- }
- case 7: /* FMOV vec, GR. */
- switch (size_type)
- {
- case 0: fgmovs (cpu); return;
- case 3: fgmovd (cpu); return;
- default: HALT_UNALLOC;
- }
- case 24: /* FCVTZS. */
- switch (size_type)
- {
- case 0: fcvtszs32 (cpu); return;
- case 1: fcvtszd32 (cpu); return;
- case 2: fcvtszs (cpu); return;
- case 3: fcvtszd (cpu); return;
- }
- case 25: do_fcvtzu (cpu); return;
- case 3: do_UCVTF (cpu); return;
- case 0: /* FCVTNS. */
- case 1: /* FCVTNU. */
- case 4: /* FCVTAS. */
- case 5: /* FCVTAU. */
- case 8: /* FCVPTS. */
- case 9: /* FCVTPU. */
- case 16: /* FCVTMS. */
- case 17: /* FCVTMU. */
- default:
- HALT_NYI;
- }
- }
- static void
- set_flags_for_float_compare (sim_cpu *cpu, float fvalue1, float fvalue2)
- {
- uint32_t flags;
- /* FIXME: Add exception raising. */
- if (isnan (fvalue1) || isnan (fvalue2))
- flags = C|V;
- else if (isinf (fvalue1) && isinf (fvalue2))
- {
- /* Subtracting two infinities may give a NaN. We only need to compare
- the signs, which we can get from isinf. */
- int result = isinf (fvalue1) - isinf (fvalue2);
- if (result == 0)
- flags = Z|C;
- else if (result < 0)
- flags = N;
- else /* (result > 0). */
- flags = C;
- }
- else
- {
- float result = fvalue1 - fvalue2;
- if (result == 0.0)
- flags = Z|C;
- else if (result < 0)
- flags = N;
- else /* (result > 0). */
- flags = C;
- }
- aarch64_set_CPSR (cpu, flags);
- }
- static void
- fcmps (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- float fvalue1 = aarch64_get_FP_float (cpu, sn);
- float fvalue2 = aarch64_get_FP_float (cpu, sm);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- set_flags_for_float_compare (cpu, fvalue1, fvalue2);
- }
- /* Float compare to zero -- Invalid Operation exception
- only on signaling NaNs. */
- static void
- fcmpzs (sim_cpu *cpu)
- {
- unsigned sn = INSTR ( 9, 5);
- float fvalue1 = aarch64_get_FP_float (cpu, sn);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- set_flags_for_float_compare (cpu, fvalue1, 0.0f);
- }
- /* Float compare -- Invalid Operation exception on all NaNs. */
- static void
- fcmpes (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- float fvalue1 = aarch64_get_FP_float (cpu, sn);
- float fvalue2 = aarch64_get_FP_float (cpu, sm);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- set_flags_for_float_compare (cpu, fvalue1, fvalue2);
- }
- /* Float compare to zero -- Invalid Operation exception on all NaNs. */
- static void
- fcmpzes (sim_cpu *cpu)
- {
- unsigned sn = INSTR ( 9, 5);
- float fvalue1 = aarch64_get_FP_float (cpu, sn);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- set_flags_for_float_compare (cpu, fvalue1, 0.0f);
- }
- static void
- set_flags_for_double_compare (sim_cpu *cpu, double dval1, double dval2)
- {
- uint32_t flags;
- /* FIXME: Add exception raising. */
- if (isnan (dval1) || isnan (dval2))
- flags = C|V;
- else if (isinf (dval1) && isinf (dval2))
- {
- /* Subtracting two infinities may give a NaN. We only need to compare
- the signs, which we can get from isinf. */
- int result = isinf (dval1) - isinf (dval2);
- if (result == 0)
- flags = Z|C;
- else if (result < 0)
- flags = N;
- else /* (result > 0). */
- flags = C;
- }
- else
- {
- double result = dval1 - dval2;
- if (result == 0.0)
- flags = Z|C;
- else if (result < 0)
- flags = N;
- else /* (result > 0). */
- flags = C;
- }
- aarch64_set_CPSR (cpu, flags);
- }
- /* Double compare -- Invalid Operation exception only on signaling NaNs. */
- static void
- fcmpd (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- double dvalue1 = aarch64_get_FP_double (cpu, sn);
- double dvalue2 = aarch64_get_FP_double (cpu, sm);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- set_flags_for_double_compare (cpu, dvalue1, dvalue2);
- }
- /* Double compare to zero -- Invalid Operation exception
- only on signaling NaNs. */
- static void
- fcmpzd (sim_cpu *cpu)
- {
- unsigned sn = INSTR ( 9, 5);
- double dvalue1 = aarch64_get_FP_double (cpu, sn);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- set_flags_for_double_compare (cpu, dvalue1, 0.0);
- }
- /* Double compare -- Invalid Operation exception on all NaNs. */
- static void
- fcmped (sim_cpu *cpu)
- {
- unsigned sm = INSTR (20, 16);
- unsigned sn = INSTR ( 9, 5);
- double dvalue1 = aarch64_get_FP_double (cpu, sn);
- double dvalue2 = aarch64_get_FP_double (cpu, sm);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- set_flags_for_double_compare (cpu, dvalue1, dvalue2);
- }
- /* Double compare to zero -- Invalid Operation exception on all NaNs. */
- static void
- fcmpzed (sim_cpu *cpu)
- {
- unsigned sn = INSTR ( 9, 5);
- double dvalue1 = aarch64_get_FP_double (cpu, sn);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- set_flags_for_double_compare (cpu, dvalue1, 0.0);
- }
- static void
- dexSimpleFPCompare (sim_cpu *cpu)
- {
- /* assert instr[28,25] == 1111
- instr[30:24:21:13,10] = 0011000
- instr[31] = M : 0 ==> OK, 1 ==> UNALLOC
- instr[29] ==> S : 0 ==> OK, 1 ==> UNALLOC
- instr[23,22] ==> type : 0 ==> single, 01 ==> double, 1x ==> UNALLOC
- instr[15,14] ==> op : 00 ==> OK, ow ==> UNALLOC
- instr[4,0] ==> opcode2 : 00000 ==> FCMP, 10000 ==> FCMPE,
- 01000 ==> FCMPZ, 11000 ==> FCMPEZ,
- ow ==> UNALLOC */
- uint32_t dispatch;
- uint32_t M_S = (INSTR (31, 31) << 1) | INSTR (29, 29);
- uint32_t type = INSTR (23, 22);
- uint32_t op = INSTR (15, 14);
- uint32_t op2_2_0 = INSTR (2, 0);
- if (op2_2_0 != 0)
- HALT_UNALLOC;
- if (M_S != 0)
- HALT_UNALLOC;
- if (type > 1)
- HALT_UNALLOC;
- if (op != 0)
- HALT_UNALLOC;
- /* dispatch on type and top 2 bits of opcode. */
- dispatch = (type << 2) | INSTR (4, 3);
- switch (dispatch)
- {
- case 0: fcmps (cpu); return;
- case 1: fcmpzs (cpu); return;
- case 2: fcmpes (cpu); return;
- case 3: fcmpzes (cpu); return;
- case 4: fcmpd (cpu); return;
- case 5: fcmpzd (cpu); return;
- case 6: fcmped (cpu); return;
- case 7: fcmpzed (cpu); return;
- }
- }
- static void
- do_scalar_FADDP (sim_cpu *cpu)
- {
- /* instr [31,23] = 0111 1110 0
- instr [22] = single(0)/double(1)
- instr [21,10] = 11 0000 1101 10
- instr [9,5] = Fn
- instr [4,0] = Fd. */
- unsigned Fn = INSTR (9, 5);
- unsigned Fd = INSTR (4, 0);
- NYI_assert (31, 23, 0x0FC);
- NYI_assert (21, 10, 0xC36);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- double val1 = aarch64_get_vec_double (cpu, Fn, 0);
- double val2 = aarch64_get_vec_double (cpu, Fn, 1);
- aarch64_set_FP_double (cpu, Fd, val1 + val2);
- }
- else
- {
- float val1 = aarch64_get_vec_float (cpu, Fn, 0);
- float val2 = aarch64_get_vec_float (cpu, Fn, 1);
- aarch64_set_FP_float (cpu, Fd, val1 + val2);
- }
- }
- /* Floating point absolute difference. */
- static void
- do_scalar_FABD (sim_cpu *cpu)
- {
- /* instr [31,23] = 0111 1110 1
- instr [22] = float(0)/double(1)
- instr [21] = 1
- instr [20,16] = Rm
- instr [15,10] = 1101 01
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 23, 0x0FD);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 10, 0x35);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- aarch64_set_FP_double (cpu, rd,
- fabs (aarch64_get_FP_double (cpu, rn)
- - aarch64_get_FP_double (cpu, rm)));
- else
- aarch64_set_FP_float (cpu, rd,
- fabsf (aarch64_get_FP_float (cpu, rn)
- - aarch64_get_FP_float (cpu, rm)));
- }
- static void
- do_scalar_CMGT (sim_cpu *cpu)
- {
- /* instr [31,21] = 0101 1110 111
- instr [20,16] = Rm
- instr [15,10] = 00 1101
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 21, 0x2F7);
- NYI_assert (15, 10, 0x0D);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u64 (cpu, rd, 0,
- aarch64_get_vec_u64 (cpu, rn, 0) >
- aarch64_get_vec_u64 (cpu, rm, 0) ? -1L : 0L);
- }
- static void
- do_scalar_USHR (sim_cpu *cpu)
- {
- /* instr [31,23] = 0111 1111 0
- instr [22,16] = shift amount
- instr [15,10] = 0000 01
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned amount = 128 - INSTR (22, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 23, 0x0FE);
- NYI_assert (15, 10, 0x01);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u64 (cpu, rd, 0,
- aarch64_get_vec_u64 (cpu, rn, 0) >> amount);
- }
- static void
- do_scalar_SSHL (sim_cpu *cpu)
- {
- /* instr [31,21] = 0101 1110 111
- instr [20,16] = Rm
- instr [15,10] = 0100 01
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- signed int shift = aarch64_get_vec_s8 (cpu, rm, 0);
- NYI_assert (31, 21, 0x2F7);
- NYI_assert (15, 10, 0x11);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (shift >= 0)
- aarch64_set_vec_s64 (cpu, rd, 0,
- aarch64_get_vec_s64 (cpu, rn, 0) << shift);
- else
- aarch64_set_vec_s64 (cpu, rd, 0,
- aarch64_get_vec_s64 (cpu, rn, 0) >> - shift);
- }
- /* Floating point scalar compare greater than or equal to 0. */
- static void
- do_scalar_FCMGE_zero (sim_cpu *cpu)
- {
- /* instr [31,23] = 0111 1110 1
- instr [22,22] = size
- instr [21,16] = 1000 00
- instr [15,10] = 1100 10
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned size = INSTR (22, 22);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 23, 0x0FD);
- NYI_assert (21, 16, 0x20);
- NYI_assert (15, 10, 0x32);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (size)
- aarch64_set_vec_u64 (cpu, rd, 0,
- aarch64_get_vec_double (cpu, rn, 0) >= 0.0 ? -1 : 0);
- else
- aarch64_set_vec_u32 (cpu, rd, 0,
- aarch64_get_vec_float (cpu, rn, 0) >= 0.0 ? -1 : 0);
- }
- /* Floating point scalar compare less than or equal to 0. */
- static void
- do_scalar_FCMLE_zero (sim_cpu *cpu)
- {
- /* instr [31,23] = 0111 1110 1
- instr [22,22] = size
- instr [21,16] = 1000 00
- instr [15,10] = 1101 10
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned size = INSTR (22, 22);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 23, 0x0FD);
- NYI_assert (21, 16, 0x20);
- NYI_assert (15, 10, 0x36);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (size)
- aarch64_set_vec_u64 (cpu, rd, 0,
- aarch64_get_vec_double (cpu, rn, 0) <= 0.0 ? -1 : 0);
- else
- aarch64_set_vec_u32 (cpu, rd, 0,
- aarch64_get_vec_float (cpu, rn, 0) <= 0.0 ? -1 : 0);
- }
- /* Floating point scalar compare greater than 0. */
- static void
- do_scalar_FCMGT_zero (sim_cpu *cpu)
- {
- /* instr [31,23] = 0101 1110 1
- instr [22,22] = size
- instr [21,16] = 1000 00
- instr [15,10] = 1100 10
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned size = INSTR (22, 22);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 23, 0x0BD);
- NYI_assert (21, 16, 0x20);
- NYI_assert (15, 10, 0x32);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (size)
- aarch64_set_vec_u64 (cpu, rd, 0,
- aarch64_get_vec_double (cpu, rn, 0) > 0.0 ? -1 : 0);
- else
- aarch64_set_vec_u32 (cpu, rd, 0,
- aarch64_get_vec_float (cpu, rn, 0) > 0.0 ? -1 : 0);
- }
- /* Floating point scalar compare equal to 0. */
- static void
- do_scalar_FCMEQ_zero (sim_cpu *cpu)
- {
- /* instr [31,23] = 0101 1110 1
- instr [22,22] = size
- instr [21,16] = 1000 00
- instr [15,10] = 1101 10
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned size = INSTR (22, 22);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 23, 0x0BD);
- NYI_assert (21, 16, 0x20);
- NYI_assert (15, 10, 0x36);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (size)
- aarch64_set_vec_u64 (cpu, rd, 0,
- aarch64_get_vec_double (cpu, rn, 0) == 0.0 ? -1 : 0);
- else
- aarch64_set_vec_u32 (cpu, rd, 0,
- aarch64_get_vec_float (cpu, rn, 0) == 0.0 ? -1 : 0);
- }
- /* Floating point scalar compare less than 0. */
- static void
- do_scalar_FCMLT_zero (sim_cpu *cpu)
- {
- /* instr [31,23] = 0101 1110 1
- instr [22,22] = size
- instr [21,16] = 1000 00
- instr [15,10] = 1110 10
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned size = INSTR (22, 22);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 23, 0x0BD);
- NYI_assert (21, 16, 0x20);
- NYI_assert (15, 10, 0x3A);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (size)
- aarch64_set_vec_u64 (cpu, rd, 0,
- aarch64_get_vec_double (cpu, rn, 0) < 0.0 ? -1 : 0);
- else
- aarch64_set_vec_u32 (cpu, rd, 0,
- aarch64_get_vec_float (cpu, rn, 0) < 0.0 ? -1 : 0);
- }
- static void
- do_scalar_shift (sim_cpu *cpu)
- {
- /* instr [31,23] = 0101 1111 0
- instr [22,16] = shift amount
- instr [15,10] = 0101 01 [SHL]
- instr [15,10] = 0000 01 [SSHR]
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned amount;
- NYI_assert (31, 23, 0x0BE);
- if (INSTR (22, 22) == 0)
- HALT_UNALLOC;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- switch (INSTR (15, 10))
- {
- case 0x01: /* SSHR */
- amount = 128 - INSTR (22, 16);
- aarch64_set_vec_s64 (cpu, rd, 0,
- aarch64_get_vec_s64 (cpu, rn, 0) >> amount);
- return;
- case 0x15: /* SHL */
- amount = INSTR (22, 16) - 64;
- aarch64_set_vec_u64 (cpu, rd, 0,
- aarch64_get_vec_u64 (cpu, rn, 0) << amount);
- return;
- default:
- HALT_NYI;
- }
- }
- /* FCMEQ FCMGT FCMGE. */
- static void
- do_scalar_FCM (sim_cpu *cpu)
- {
- /* instr [31,30] = 01
- instr [29] = U
- instr [28,24] = 1 1110
- instr [23] = E
- instr [22] = size
- instr [21] = 1
- instr [20,16] = Rm
- instr [15,12] = 1110
- instr [11] = AC
- instr [10] = 1
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned EUac = (INSTR (23, 23) << 2) | (INSTR (29, 29) << 1) | INSTR (11, 11);
- unsigned result;
- float val1;
- float val2;
- NYI_assert (31, 30, 1);
- NYI_assert (28, 24, 0x1E);
- NYI_assert (21, 21, 1);
- NYI_assert (15, 12, 0xE);
- NYI_assert (10, 10, 1);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- double val1 = aarch64_get_FP_double (cpu, rn);
- double val2 = aarch64_get_FP_double (cpu, rm);
- switch (EUac)
- {
- case 0: /* 000 */
- result = val1 == val2;
- break;
- case 3: /* 011 */
- val1 = fabs (val1);
- val2 = fabs (val2);
- /* Fall through. */
- case 2: /* 010 */
- result = val1 >= val2;
- break;
- case 7: /* 111 */
- val1 = fabs (val1);
- val2 = fabs (val2);
- /* Fall through. */
- case 6: /* 110 */
- result = val1 > val2;
- break;
- default:
- HALT_UNALLOC;
- }
- aarch64_set_vec_u32 (cpu, rd, 0, result ? -1 : 0);
- return;
- }
- val1 = aarch64_get_FP_float (cpu, rn);
- val2 = aarch64_get_FP_float (cpu, rm);
- switch (EUac)
- {
- case 0: /* 000 */
- result = val1 == val2;
- break;
- case 3: /* 011 */
- val1 = fabsf (val1);
- val2 = fabsf (val2);
- /* Fall through. */
- case 2: /* 010 */
- result = val1 >= val2;
- break;
- case 7: /* 111 */
- val1 = fabsf (val1);
- val2 = fabsf (val2);
- /* Fall through. */
- case 6: /* 110 */
- result = val1 > val2;
- break;
- default:
- HALT_UNALLOC;
- }
- aarch64_set_vec_u32 (cpu, rd, 0, result ? -1 : 0);
- }
- /* An alias of DUP. */
- static void
- do_scalar_MOV (sim_cpu *cpu)
- {
- /* instr [31,21] = 0101 1110 000
- instr [20,16] = imm5
- instr [15,10] = 0000 01
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- unsigned index;
- NYI_assert (31, 21, 0x2F0);
- NYI_assert (15, 10, 0x01);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (16, 16))
- {
- /* 8-bit. */
- index = INSTR (20, 17);
- aarch64_set_vec_u8
- (cpu, rd, 0, aarch64_get_vec_u8 (cpu, rn, index));
- }
- else if (INSTR (17, 17))
- {
- /* 16-bit. */
- index = INSTR (20, 18);
- aarch64_set_vec_u16
- (cpu, rd, 0, aarch64_get_vec_u16 (cpu, rn, index));
- }
- else if (INSTR (18, 18))
- {
- /* 32-bit. */
- index = INSTR (20, 19);
- aarch64_set_vec_u32
- (cpu, rd, 0, aarch64_get_vec_u32 (cpu, rn, index));
- }
- else if (INSTR (19, 19))
- {
- /* 64-bit. */
- index = INSTR (20, 20);
- aarch64_set_vec_u64
- (cpu, rd, 0, aarch64_get_vec_u64 (cpu, rn, index));
- }
- else
- HALT_UNALLOC;
- }
- static void
- do_scalar_NEG (sim_cpu *cpu)
- {
- /* instr [31,10] = 0111 1110 1110 0000 1011 10
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 10, 0x1FB82E);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_vec_u64 (cpu, rd, 0, - aarch64_get_vec_u64 (cpu, rn, 0));
- }
- static void
- do_scalar_USHL (sim_cpu *cpu)
- {
- /* instr [31,21] = 0111 1110 111
- instr [20,16] = Rm
- instr [15,10] = 0100 01
- instr [9, 5] = Rn
- instr [4, 0] = Rd. */
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- signed int shift = aarch64_get_vec_s8 (cpu, rm, 0);
- NYI_assert (31, 21, 0x3F7);
- NYI_assert (15, 10, 0x11);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (shift >= 0)
- aarch64_set_vec_u64 (cpu, rd, 0, aarch64_get_vec_u64 (cpu, rn, 0) << shift);
- else
- aarch64_set_vec_u64 (cpu, rd, 0, aarch64_get_vec_u64 (cpu, rn, 0) >> - shift);
- }
- static void
- do_double_add (sim_cpu *cpu)
- {
- /* instr [31,21] = 0101 1110 111
- instr [20,16] = Fn
- instr [15,10] = 1000 01
- instr [9,5] = Fm
- instr [4,0] = Fd. */
- unsigned Fd;
- unsigned Fm;
- unsigned Fn;
- double val1;
- double val2;
- NYI_assert (31, 21, 0x2F7);
- NYI_assert (15, 10, 0x21);
- Fd = INSTR (4, 0);
- Fm = INSTR (9, 5);
- Fn = INSTR (20, 16);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- val1 = aarch64_get_FP_double (cpu, Fm);
- val2 = aarch64_get_FP_double (cpu, Fn);
- aarch64_set_FP_double (cpu, Fd, val1 + val2);
- }
- static void
- do_scalar_UCVTF (sim_cpu *cpu)
- {
- /* instr [31,23] = 0111 1110 0
- instr [22] = single(0)/double(1)
- instr [21,10] = 10 0001 1101 10
- instr [9,5] = rn
- instr [4,0] = rd. */
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- NYI_assert (31, 23, 0x0FC);
- NYI_assert (21, 10, 0x876);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (INSTR (22, 22))
- {
- uint64_t val = aarch64_get_vec_u64 (cpu, rn, 0);
- aarch64_set_vec_double (cpu, rd, 0, (double) val);
- }
- else
- {
- uint32_t val = aarch64_get_vec_u32 (cpu, rn, 0);
- aarch64_set_vec_float (cpu, rd, 0, (float) val);
- }
- }
- static void
- do_scalar_vec (sim_cpu *cpu)
- {
- /* instr [30] = 1. */
- /* instr [28,25] = 1111. */
- switch (INSTR (31, 23))
- {
- case 0xBC:
- switch (INSTR (15, 10))
- {
- case 0x01: do_scalar_MOV (cpu); return;
- case 0x39: do_scalar_FCM (cpu); return;
- case 0x3B: do_scalar_FCM (cpu); return;
- }
- break;
- case 0xBE: do_scalar_shift (cpu); return;
- case 0xFC:
- switch (INSTR (15, 10))
- {
- case 0x36:
- switch (INSTR (21, 16))
- {
- case 0x30: do_scalar_FADDP (cpu); return;
- case 0x21: do_scalar_UCVTF (cpu); return;
- }
- HALT_NYI;
- case 0x39: do_scalar_FCM (cpu); return;
- case 0x3B: do_scalar_FCM (cpu); return;
- }
- break;
- case 0xFD:
- switch (INSTR (15, 10))
- {
- case 0x0D: do_scalar_CMGT (cpu); return;
- case 0x11: do_scalar_USHL (cpu); return;
- case 0x2E: do_scalar_NEG (cpu); return;
- case 0x32: do_scalar_FCMGE_zero (cpu); return;
- case 0x35: do_scalar_FABD (cpu); return;
- case 0x36: do_scalar_FCMLE_zero (cpu); return;
- case 0x39: do_scalar_FCM (cpu); return;
- case 0x3B: do_scalar_FCM (cpu); return;
- default:
- HALT_NYI;
- }
- case 0xFE: do_scalar_USHR (cpu); return;
- case 0xBD:
- switch (INSTR (15, 10))
- {
- case 0x21: do_double_add (cpu); return;
- case 0x11: do_scalar_SSHL (cpu); return;
- case 0x32: do_scalar_FCMGT_zero (cpu); return;
- case 0x36: do_scalar_FCMEQ_zero (cpu); return;
- case 0x3A: do_scalar_FCMLT_zero (cpu); return;
- default:
- HALT_NYI;
- }
- default:
- HALT_NYI;
- }
- }
- static void
- dexAdvSIMD1 (sim_cpu *cpu)
- {
- /* instr [28,25] = 1 111. */
- /* We are currently only interested in the basic
- scalar fp routines which all have bit 30 = 0. */
- if (INSTR (30, 30))
- do_scalar_vec (cpu);
- /* instr[24] is set for FP data processing 3-source and clear for
- all other basic scalar fp instruction groups. */
- else if (INSTR (24, 24))
- dexSimpleFPDataProc3Source (cpu);
- /* instr[21] is clear for floating <-> fixed conversions and set for
- all other basic scalar fp instruction groups. */
- else if (!INSTR (21, 21))
- dexSimpleFPFixedConvert (cpu);
- /* instr[11,10] : 01 ==> cond compare, 10 ==> Data Proc 2 Source
- 11 ==> cond select, 00 ==> other. */
- else
- switch (INSTR (11, 10))
- {
- case 1: dexSimpleFPCondCompare (cpu); return;
- case 2: dexSimpleFPDataProc2Source (cpu); return;
- case 3: dexSimpleFPCondSelect (cpu); return;
- default:
- /* Now an ordered cascade of tests.
- FP immediate has instr [12] == 1.
- FP compare has instr [13] == 1.
- FP Data Proc 1 Source has instr [14] == 1.
- FP floating <--> integer conversions has instr [15] == 0. */
- if (INSTR (12, 12))
- dexSimpleFPImmediate (cpu);
- else if (INSTR (13, 13))
- dexSimpleFPCompare (cpu);
- else if (INSTR (14, 14))
- dexSimpleFPDataProc1Source (cpu);
- else if (!INSTR (15, 15))
- dexSimpleFPIntegerConvert (cpu);
- else
- /* If we get here then instr[15] == 1 which means UNALLOC. */
- HALT_UNALLOC;
- }
- }
- /* PC relative addressing. */
- static void
- pcadr (sim_cpu *cpu)
- {
- /* instr[31] = op : 0 ==> ADR, 1 ==> ADRP
- instr[30,29] = immlo
- instr[23,5] = immhi. */
- uint64_t address;
- unsigned rd = INSTR (4, 0);
- uint32_t isPage = INSTR (31, 31);
- union { int64_t u64; uint64_t s64; } imm;
- uint64_t offset;
- imm.s64 = simm64 (aarch64_get_instr (cpu), 23, 5);
- offset = imm.u64;
- offset = (offset << 2) | INSTR (30, 29);
- address = aarch64_get_PC (cpu);
- if (isPage)
- {
- offset <<= 12;
- address &= ~0xfff;
- }
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, address + offset);
- }
- /* Specific decode and execute for group Data Processing Immediate. */
- static void
- dexPCRelAddressing (sim_cpu *cpu)
- {
- /* assert instr[28,24] = 10000. */
- pcadr (cpu);
- }
- /* Immediate logical.
- The bimm32/64 argument is constructed by replicating a 2, 4, 8,
- 16, 32 or 64 bit sequence pulled out at decode and possibly
- inverting it..
- N.B. the output register (dest) can normally be Xn or SP
- the exception occurs for flag setting instructions which may
- only use Xn for the output (dest). The input register can
- never be SP. */
- /* 32 bit and immediate. */
- static void
- and32 (sim_cpu *cpu, uint32_t bimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u32 (cpu, rn, NO_SP) & bimm);
- }
- /* 64 bit and immediate. */
- static void
- and64 (sim_cpu *cpu, uint64_t bimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u64 (cpu, rn, NO_SP) & bimm);
- }
- /* 32 bit and immediate set flags. */
- static void
- ands32 (sim_cpu *cpu, uint32_t bimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t value2 = bimm;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
- set_flags_for_binop32 (cpu, value1 & value2);
- }
- /* 64 bit and immediate set flags. */
- static void
- ands64 (sim_cpu *cpu, uint64_t bimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t value2 = bimm;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
- set_flags_for_binop64 (cpu, value1 & value2);
- }
- /* 32 bit exclusive or immediate. */
- static void
- eor32 (sim_cpu *cpu, uint32_t bimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u32 (cpu, rn, NO_SP) ^ bimm);
- }
- /* 64 bit exclusive or immediate. */
- static void
- eor64 (sim_cpu *cpu, uint64_t bimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u64 (cpu, rn, NO_SP) ^ bimm);
- }
- /* 32 bit or immediate. */
- static void
- orr32 (sim_cpu *cpu, uint32_t bimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u32 (cpu, rn, NO_SP) | bimm);
- }
- /* 64 bit or immediate. */
- static void
- orr64 (sim_cpu *cpu, uint64_t bimm)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, SP_OK,
- aarch64_get_reg_u64 (cpu, rn, NO_SP) | bimm);
- }
- /* Logical shifted register.
- These allow an optional LSL, ASR, LSR or ROR to the second source
- register with a count up to the register bit count.
- N.B register args may not be SP. */
- /* 32 bit AND shifted register. */
- static void
- and32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
- & shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
- }
- /* 64 bit AND shifted register. */
- static void
- and64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
- & shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
- }
- /* 32 bit AND shifted register setting flags. */
- static void
- ands32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- shift, count);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
- set_flags_for_binop32 (cpu, value1 & value2);
- }
- /* 64 bit AND shifted register setting flags. */
- static void
- ands64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
- shift, count);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
- set_flags_for_binop64 (cpu, value1 & value2);
- }
- /* 32 bit BIC shifted register. */
- static void
- bic32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
- & ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
- }
- /* 64 bit BIC shifted register. */
- static void
- bic64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
- & ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
- }
- /* 32 bit BIC shifted register setting flags. */
- static void
- bics32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t value2 = ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- shift, count);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
- set_flags_for_binop32 (cpu, value1 & value2);
- }
- /* 64 bit BIC shifted register setting flags. */
- static void
- bics64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t value2 = ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
- shift, count);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value1 & value2);
- set_flags_for_binop64 (cpu, value1 & value2);
- }
- /* 32 bit EON shifted register. */
- static void
- eon32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
- ^ ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
- }
- /* 64 bit EON shifted register. */
- static void
- eon64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
- ^ ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
- }
- /* 32 bit EOR shifted register. */
- static void
- eor32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
- ^ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
- }
- /* 64 bit EOR shifted register. */
- static void
- eor64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
- ^ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
- }
- /* 32 bit ORR shifted register. */
- static void
- orr32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
- | shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
- }
- /* 64 bit ORR shifted register. */
- static void
- orr64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
- | shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
- }
- /* 32 bit ORN shifted register. */
- static void
- orn32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
- | ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP), shift, count));
- }
- /* 64 bit ORN shifted register. */
- static void
- orn64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
- | ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP), shift, count));
- }
- static void
- dexLogicalImmediate (sim_cpu *cpu)
- {
- /* assert instr[28,23] = 1001000
- instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30,29] = op : 0 ==> AND, 1 ==> ORR, 2 ==> EOR, 3 ==> ANDS
- instr[22] = N : used to construct immediate mask
- instr[21,16] = immr
- instr[15,10] = imms
- instr[9,5] = Rn
- instr[4,0] = Rd */
- /* 32 bit operations must have N = 0 or else we have an UNALLOC. */
- uint32_t size = INSTR (31, 31);
- uint32_t N = INSTR (22, 22);
- /* uint32_t immr = INSTR (21, 16);. */
- /* uint32_t imms = INSTR (15, 10);. */
- uint32_t index = INSTR (22, 10);
- uint64_t bimm64 = LITable [index];
- uint32_t dispatch = INSTR (30, 29);
- if (~size & N)
- HALT_UNALLOC;
- if (!bimm64)
- HALT_UNALLOC;
- if (size == 0)
- {
- uint32_t bimm = (uint32_t) bimm64;
- switch (dispatch)
- {
- case 0: and32 (cpu, bimm); return;
- case 1: orr32 (cpu, bimm); return;
- case 2: eor32 (cpu, bimm); return;
- case 3: ands32 (cpu, bimm); return;
- }
- }
- else
- {
- switch (dispatch)
- {
- case 0: and64 (cpu, bimm64); return;
- case 1: orr64 (cpu, bimm64); return;
- case 2: eor64 (cpu, bimm64); return;
- case 3: ands64 (cpu, bimm64); return;
- }
- }
- HALT_UNALLOC;
- }
- /* Immediate move.
- The uimm argument is a 16 bit value to be inserted into the
- target register the pos argument locates the 16 bit word in the
- dest register i.e. it is in {0, 1} for 32 bit and {0, 1, 2,
- 3} for 64 bit.
- N.B register arg may not be SP so it should be.
- accessed using the setGZRegisterXXX accessors. */
- /* 32 bit move 16 bit immediate zero remaining shorts. */
- static void
- movz32 (sim_cpu *cpu, uint32_t val, uint32_t pos)
- {
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, val << (pos * 16));
- }
- /* 64 bit move 16 bit immediate zero remaining shorts. */
- static void
- movz64 (sim_cpu *cpu, uint32_t val, uint32_t pos)
- {
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, ((uint64_t) val) << (pos * 16));
- }
- /* 32 bit move 16 bit immediate negated. */
- static void
- movn32 (sim_cpu *cpu, uint32_t val, uint32_t pos)
- {
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, ((val << (pos * 16)) ^ 0xffffffffU));
- }
- /* 64 bit move 16 bit immediate negated. */
- static void
- movn64 (sim_cpu *cpu, uint32_t val, uint32_t pos)
- {
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, ((((uint64_t) val) << (pos * 16))
- ^ 0xffffffffffffffffULL));
- }
- /* 32 bit move 16 bit immediate keep remaining shorts. */
- static void
- movk32 (sim_cpu *cpu, uint32_t val, uint32_t pos)
- {
- unsigned rd = INSTR (4, 0);
- uint32_t current = aarch64_get_reg_u32 (cpu, rd, NO_SP);
- uint32_t value = val << (pos * 16);
- uint32_t mask = ~(0xffffU << (pos * 16));
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, (value | (current & mask)));
- }
- /* 64 bit move 16 it immediate keep remaining shorts. */
- static void
- movk64 (sim_cpu *cpu, uint32_t val, uint32_t pos)
- {
- unsigned rd = INSTR (4, 0);
- uint64_t current = aarch64_get_reg_u64 (cpu, rd, NO_SP);
- uint64_t value = (uint64_t) val << (pos * 16);
- uint64_t mask = ~(0xffffULL << (pos * 16));
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, (value | (current & mask)));
- }
- static void
- dexMoveWideImmediate (sim_cpu *cpu)
- {
- /* assert instr[28:23] = 100101
- instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30,29] = op : 0 ==> MOVN, 1 ==> UNALLOC, 2 ==> MOVZ, 3 ==> MOVK
- instr[22,21] = shift : 00 == LSL#0, 01 = LSL#16, 10 = LSL#32, 11 = LSL#48
- instr[20,5] = uimm16
- instr[4,0] = Rd */
- /* N.B. the (multiple of 16) shift is applied by the called routine,
- we just pass the multiplier. */
- uint32_t imm;
- uint32_t size = INSTR (31, 31);
- uint32_t op = INSTR (30, 29);
- uint32_t shift = INSTR (22, 21);
- /* 32 bit can only shift 0 or 1 lot of 16.
- anything else is an unallocated instruction. */
- if (size == 0 && (shift > 1))
- HALT_UNALLOC;
- if (op == 1)
- HALT_UNALLOC;
- imm = INSTR (20, 5);
- if (size == 0)
- {
- if (op == 0)
- movn32 (cpu, imm, shift);
- else if (op == 2)
- movz32 (cpu, imm, shift);
- else
- movk32 (cpu, imm, shift);
- }
- else
- {
- if (op == 0)
- movn64 (cpu, imm, shift);
- else if (op == 2)
- movz64 (cpu, imm, shift);
- else
- movk64 (cpu, imm, shift);
- }
- }
- /* Bitfield operations.
- These take a pair of bit positions r and s which are in {0..31}
- or {0..63} depending on the instruction word size.
- N.B register args may not be SP. */
- /* OK, we start with ubfm which just needs to pick
- some bits out of source zero the rest and write
- the result to dest. Just need two logical shifts. */
- /* 32 bit bitfield move, left and right of affected zeroed
- if r <= s Wd<s-r:0> = Wn<s:r> else Wd<32+s-r,32-r> = Wn<s:0>. */
- static void
- ubfm32 (sim_cpu *cpu, uint32_t r, uint32_t s)
- {
- unsigned rd;
- unsigned rn = INSTR (9, 5);
- uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- /* Pick either s+1-r or s+1 consecutive bits out of the original word. */
- if (r <= s)
- {
- /* 31:...:s:xxx:r:...:0 ==> 31:...:s-r:xxx:0.
- We want only bits s:xxx:r at the bottom of the word
- so we LSL bit s up to bit 31 i.e. by 31 - s
- and then we LSR to bring bit 31 down to bit s - r
- i.e. by 31 + r - s. */
- value <<= 31 - s;
- value >>= 31 + r - s;
- }
- else
- {
- /* 31:...:s:xxx:0 ==> 31:...:31-(r-1)+s:xxx:31-(r-1):...:0
- We want only bits s:xxx:0 starting at it 31-(r-1)
- so we LSL bit s up to bit 31 i.e. by 31 - s
- and then we LSL to bring bit 31 down to 31-(r-1)+s
- i.e. by r - (s + 1). */
- value <<= 31 - s;
- value >>= r - (s + 1);
- }
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
- }
- /* 64 bit bitfield move, left and right of affected zeroed
- if r <= s Wd<s-r:0> = Wn<s:r> else Wd<64+s-r,64-r> = Wn<s:0>. */
- static void
- ubfm (sim_cpu *cpu, uint32_t r, uint32_t s)
- {
- unsigned rd;
- unsigned rn = INSTR (9, 5);
- uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- if (r <= s)
- {
- /* 63:...:s:xxx:r:...:0 ==> 63:...:s-r:xxx:0.
- We want only bits s:xxx:r at the bottom of the word.
- So we LSL bit s up to bit 63 i.e. by 63 - s
- and then we LSR to bring bit 63 down to bit s - r
- i.e. by 63 + r - s. */
- value <<= 63 - s;
- value >>= 63 + r - s;
- }
- else
- {
- /* 63:...:s:xxx:0 ==> 63:...:63-(r-1)+s:xxx:63-(r-1):...:0.
- We want only bits s:xxx:0 starting at it 63-(r-1).
- So we LSL bit s up to bit 63 i.e. by 63 - s
- and then we LSL to bring bit 63 down to 63-(r-1)+s
- i.e. by r - (s + 1). */
- value <<= 63 - s;
- value >>= r - (s + 1);
- }
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
- }
- /* The signed versions need to insert sign bits
- on the left of the inserted bit field. so we do
- much the same as the unsigned version except we
- use an arithmetic shift right -- this just means
- we need to operate on signed values. */
- /* 32 bit bitfield move, left of affected sign-extended, right zeroed. */
- /* If r <= s Wd<s-r:0> = Wn<s:r> else Wd<32+s-r,32-r> = Wn<s:0>. */
- static void
- sbfm32 (sim_cpu *cpu, uint32_t r, uint32_t s)
- {
- unsigned rd;
- unsigned rn = INSTR (9, 5);
- /* as per ubfm32 but use an ASR instead of an LSR. */
- int32_t value = aarch64_get_reg_s32 (cpu, rn, NO_SP);
- if (r <= s)
- {
- value <<= 31 - s;
- value >>= 31 + r - s;
- }
- else
- {
- value <<= 31 - s;
- value >>= r - (s + 1);
- }
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) value);
- }
- /* 64 bit bitfield move, left of affected sign-extended, right zeroed. */
- /* If r <= s Wd<s-r:0> = Wn<s:r> else Wd<64+s-r,64-r> = Wn<s:0>. */
- static void
- sbfm (sim_cpu *cpu, uint32_t r, uint32_t s)
- {
- unsigned rd;
- unsigned rn = INSTR (9, 5);
- /* acpu per ubfm but use an ASR instead of an LSR. */
- int64_t value = aarch64_get_reg_s64 (cpu, rn, NO_SP);
- if (r <= s)
- {
- value <<= 63 - s;
- value >>= 63 + r - s;
- }
- else
- {
- value <<= 63 - s;
- value >>= r - (s + 1);
- }
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- rd = INSTR (4, 0);
- aarch64_set_reg_s64 (cpu, rd, NO_SP, value);
- }
- /* Finally, these versions leave non-affected bits
- as is. so we need to generate the bits as per
- ubfm and also generate a mask to pick the
- bits from the original and computed values. */
- /* 32 bit bitfield move, non-affected bits left as is.
- If r <= s Wd<s-r:0> = Wn<s:r> else Wd<32+s-r,32-r> = Wn<s:0>. */
- static void
- bfm32 (sim_cpu *cpu, uint32_t r, uint32_t s)
- {
- unsigned rn = INSTR (9, 5);
- uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t mask = -1;
- unsigned rd;
- uint32_t value2;
- /* Pick either s+1-r or s+1 consecutive bits out of the original word. */
- if (r <= s)
- {
- /* 31:...:s:xxx:r:...:0 ==> 31:...:s-r:xxx:0.
- We want only bits s:xxx:r at the bottom of the word
- so we LSL bit s up to bit 31 i.e. by 31 - s
- and then we LSR to bring bit 31 down to bit s - r
- i.e. by 31 + r - s. */
- value <<= 31 - s;
- value >>= 31 + r - s;
- /* the mask must include the same bits. */
- mask <<= 31 - s;
- mask >>= 31 + r - s;
- }
- else
- {
- /* 31:...:s:xxx:0 ==> 31:...:31-(r-1)+s:xxx:31-(r-1):...:0.
- We want only bits s:xxx:0 starting at it 31-(r-1)
- so we LSL bit s up to bit 31 i.e. by 31 - s
- and then we LSL to bring bit 31 down to 31-(r-1)+s
- i.e. by r - (s + 1). */
- value <<= 31 - s;
- value >>= r - (s + 1);
- /* The mask must include the same bits. */
- mask <<= 31 - s;
- mask >>= r - (s + 1);
- }
- rd = INSTR (4, 0);
- value2 = aarch64_get_reg_u32 (cpu, rd, NO_SP);
- value2 &= ~mask;
- value2 |= value;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, (aarch64_get_reg_u32 (cpu, rd, NO_SP) & ~mask) | value);
- }
- /* 64 bit bitfield move, non-affected bits left as is.
- If r <= s Wd<s-r:0> = Wn<s:r> else Wd<64+s-r,64-r> = Wn<s:0>. */
- static void
- bfm (sim_cpu *cpu, uint32_t r, uint32_t s)
- {
- unsigned rd;
- unsigned rn = INSTR (9, 5);
- uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t mask = 0xffffffffffffffffULL;
- if (r <= s)
- {
- /* 63:...:s:xxx:r:...:0 ==> 63:...:s-r:xxx:0.
- We want only bits s:xxx:r at the bottom of the word
- so we LSL bit s up to bit 63 i.e. by 63 - s
- and then we LSR to bring bit 63 down to bit s - r
- i.e. by 63 + r - s. */
- value <<= 63 - s;
- value >>= 63 + r - s;
- /* The mask must include the same bits. */
- mask <<= 63 - s;
- mask >>= 63 + r - s;
- }
- else
- {
- /* 63:...:s:xxx:0 ==> 63:...:63-(r-1)+s:xxx:63-(r-1):...:0
- We want only bits s:xxx:0 starting at it 63-(r-1)
- so we LSL bit s up to bit 63 i.e. by 63 - s
- and then we LSL to bring bit 63 down to 63-(r-1)+s
- i.e. by r - (s + 1). */
- value <<= 63 - s;
- value >>= r - (s + 1);
- /* The mask must include the same bits. */
- mask <<= 63 - s;
- mask >>= r - (s + 1);
- }
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- rd = INSTR (4, 0);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, (aarch64_get_reg_u64 (cpu, rd, NO_SP) & ~mask) | value);
- }
- static void
- dexBitfieldImmediate (sim_cpu *cpu)
- {
- /* assert instr[28:23] = 100110
- instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30,29] = op : 0 ==> SBFM, 1 ==> BFM, 2 ==> UBFM, 3 ==> UNALLOC
- instr[22] = N : must be 0 for 32 bit, 1 for 64 bit ow UNALLOC
- instr[21,16] = immr : 0xxxxx for 32 bit, xxxxxx for 64 bit
- instr[15,10] = imms : 0xxxxx for 32 bit, xxxxxx for 64 bit
- instr[9,5] = Rn
- instr[4,0] = Rd */
- /* 32 bit operations must have N = 0 or else we have an UNALLOC. */
- uint32_t dispatch;
- uint32_t imms;
- uint32_t size = INSTR (31, 31);
- uint32_t N = INSTR (22, 22);
- /* 32 bit operations must have immr[5] = 0 and imms[5] = 0. */
- /* or else we have an UNALLOC. */
- uint32_t immr = INSTR (21, 16);
- if (~size & N)
- HALT_UNALLOC;
- if (!size && uimm (immr, 5, 5))
- HALT_UNALLOC;
- imms = INSTR (15, 10);
- if (!size && uimm (imms, 5, 5))
- HALT_UNALLOC;
- /* Switch on combined size and op. */
- dispatch = INSTR (31, 29);
- switch (dispatch)
- {
- case 0: sbfm32 (cpu, immr, imms); return;
- case 1: bfm32 (cpu, immr, imms); return;
- case 2: ubfm32 (cpu, immr, imms); return;
- case 4: sbfm (cpu, immr, imms); return;
- case 5: bfm (cpu, immr, imms); return;
- case 6: ubfm (cpu, immr, imms); return;
- default: HALT_UNALLOC;
- }
- }
- static void
- do_EXTR_32 (sim_cpu *cpu)
- {
- /* instr[31:21] = 00010011100
- instr[20,16] = Rm
- instr[15,10] = imms : 0xxxxx for 32 bit
- instr[9,5] = Rn
- instr[4,0] = Rd */
- unsigned rm = INSTR (20, 16);
- unsigned imms = INSTR (15, 10) & 31;
- unsigned rn = INSTR ( 9, 5);
- unsigned rd = INSTR ( 4, 0);
- uint64_t val1;
- uint64_t val2;
- val1 = aarch64_get_reg_u32 (cpu, rm, NO_SP);
- val1 >>= imms;
- val2 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- val2 <<= (32 - imms);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP, val1 | val2);
- }
- static void
- do_EXTR_64 (sim_cpu *cpu)
- {
- /* instr[31:21] = 10010011100
- instr[20,16] = Rm
- instr[15,10] = imms
- instr[9,5] = Rn
- instr[4,0] = Rd */
- unsigned rm = INSTR (20, 16);
- unsigned imms = INSTR (15, 10) & 63;
- unsigned rn = INSTR ( 9, 5);
- unsigned rd = INSTR ( 4, 0);
- uint64_t val;
- val = aarch64_get_reg_u64 (cpu, rm, NO_SP);
- val >>= imms;
- val |= (aarch64_get_reg_u64 (cpu, rn, NO_SP) << (64 - imms));
- aarch64_set_reg_u64 (cpu, rd, NO_SP, val);
- }
- static void
- dexExtractImmediate (sim_cpu *cpu)
- {
- /* assert instr[28:23] = 100111
- instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30,29] = op21 : 0 ==> EXTR, 1,2,3 ==> UNALLOC
- instr[22] = N : must be 0 for 32 bit, 1 for 64 bit or UNALLOC
- instr[21] = op0 : must be 0 or UNALLOC
- instr[20,16] = Rm
- instr[15,10] = imms : 0xxxxx for 32 bit, xxxxxx for 64 bit
- instr[9,5] = Rn
- instr[4,0] = Rd */
- /* 32 bit operations must have N = 0 or else we have an UNALLOC. */
- /* 64 bit operations must have N = 1 or else we have an UNALLOC. */
- uint32_t dispatch;
- uint32_t size = INSTR (31, 31);
- uint32_t N = INSTR (22, 22);
- /* 32 bit operations must have imms[5] = 0
- or else we have an UNALLOC. */
- uint32_t imms = INSTR (15, 10);
- if (size ^ N)
- HALT_UNALLOC;
- if (!size && uimm (imms, 5, 5))
- HALT_UNALLOC;
- /* Switch on combined size and op. */
- dispatch = INSTR (31, 29);
- if (dispatch == 0)
- do_EXTR_32 (cpu);
- else if (dispatch == 4)
- do_EXTR_64 (cpu);
- else if (dispatch == 1)
- HALT_NYI;
- else
- HALT_UNALLOC;
- }
- static void
- dexDPImm (sim_cpu *cpu)
- {
- /* uint32_t group = dispatchGroup (aarch64_get_instr (cpu));
- assert group == GROUP_DPIMM_1000 || grpoup == GROUP_DPIMM_1001
- bits [25,23] of a DPImm are the secondary dispatch vector. */
- uint32_t group2 = dispatchDPImm (aarch64_get_instr (cpu));
- switch (group2)
- {
- case DPIMM_PCADR_000:
- case DPIMM_PCADR_001:
- dexPCRelAddressing (cpu);
- return;
- case DPIMM_ADDSUB_010:
- case DPIMM_ADDSUB_011:
- dexAddSubtractImmediate (cpu);
- return;
- case DPIMM_LOG_100:
- dexLogicalImmediate (cpu);
- return;
- case DPIMM_MOV_101:
- dexMoveWideImmediate (cpu);
- return;
- case DPIMM_BITF_110:
- dexBitfieldImmediate (cpu);
- return;
- case DPIMM_EXTR_111:
- dexExtractImmediate (cpu);
- return;
- default:
- /* Should never reach here. */
- HALT_NYI;
- }
- }
- static void
- dexLoadUnscaledImmediate (sim_cpu *cpu)
- {
- /* instr[29,24] == 111_00
- instr[21] == 0
- instr[11,10] == 00
- instr[31,30] = size
- instr[26] = V
- instr[23,22] = opc
- instr[20,12] = simm9
- instr[9,5] = rn may be SP. */
- /* unsigned rt = INSTR (4, 0); */
- uint32_t V = INSTR (26, 26);
- uint32_t dispatch = ((INSTR (31, 30) << 2) | INSTR (23, 22));
- int32_t imm = simm32 (aarch64_get_instr (cpu), 20, 12);
- if (!V)
- {
- /* GReg operations. */
- switch (dispatch)
- {
- case 0: sturb (cpu, imm); return;
- case 1: ldurb32 (cpu, imm); return;
- case 2: ldursb64 (cpu, imm); return;
- case 3: ldursb32 (cpu, imm); return;
- case 4: sturh (cpu, imm); return;
- case 5: ldurh32 (cpu, imm); return;
- case 6: ldursh64 (cpu, imm); return;
- case 7: ldursh32 (cpu, imm); return;
- case 8: stur32 (cpu, imm); return;
- case 9: ldur32 (cpu, imm); return;
- case 10: ldursw (cpu, imm); return;
- case 12: stur64 (cpu, imm); return;
- case 13: ldur64 (cpu, imm); return;
- case 14:
- /* PRFUM NYI. */
- HALT_NYI;
- default:
- case 11:
- case 15:
- HALT_UNALLOC;
- }
- }
- /* FReg operations. */
- switch (dispatch)
- {
- case 2: fsturq (cpu, imm); return;
- case 3: fldurq (cpu, imm); return;
- case 8: fsturs (cpu, imm); return;
- case 9: fldurs (cpu, imm); return;
- case 12: fsturd (cpu, imm); return;
- case 13: fldurd (cpu, imm); return;
- case 0: /* STUR 8 bit FP. */
- case 1: /* LDUR 8 bit FP. */
- case 4: /* STUR 16 bit FP. */
- case 5: /* LDUR 8 bit FP. */
- HALT_NYI;
- default:
- case 6:
- case 7:
- case 10:
- case 11:
- case 14:
- case 15:
- HALT_UNALLOC;
- }
- }
- /* N.B. A preliminary note regarding all the ldrs<x>32
- instructions
- The signed value loaded by these instructions is cast to unsigned
- before being assigned to aarch64_get_reg_u64 (cpu, N) i.e. to the
- 64 bit element of the GReg union. this performs a 32 bit sign extension
- (as required) but avoids 64 bit sign extension, thus ensuring that the
- top half of the register word is zero. this is what the spec demands
- when a 32 bit load occurs. */
- /* 32 bit load sign-extended byte scaled unsigned 12 bit. */
- static void
- ldrsb32_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned int rn = INSTR (9, 5);
- unsigned int rt = INSTR (4, 0);
- /* The target register may not be SP but the source may be
- there is no scaling required for a byte load. */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset;
- aarch64_set_reg_u64 (cpu, rt, NO_SP,
- (int64_t) aarch64_get_mem_s8 (cpu, address));
- }
- /* 32 bit load sign-extended byte scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- ldrsb32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned int rm = INSTR (20, 16);
- unsigned int rn = INSTR (9, 5);
- unsigned int rt = INSTR (4, 0);
- /* rn may reference SP, rm and rt must reference ZR. */
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t displacement = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- /* There is no scaling required for a byte load. */
- aarch64_set_reg_u64
- (cpu, rt, NO_SP, (int64_t) aarch64_get_mem_s8 (cpu, address
- + displacement));
- }
- /* 32 bit load sign-extended byte unscaled signed 9 bit with
- pre- or post-writeback. */
- static void
- ldrsb32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- uint64_t address;
- unsigned int rn = INSTR (9, 5);
- unsigned int rt = INSTR (4, 0);
- if (rn == rt && wb != NoWriteBack)
- HALT_UNALLOC;
- address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb == Pre)
- address += offset;
- aarch64_set_reg_u64 (cpu, rt, NO_SP,
- (int64_t) aarch64_get_mem_s8 (cpu, address));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, NO_SP, address);
- }
- /* 8 bit store scaled. */
- static void
- fstrb_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned st = INSTR (4, 0);
- unsigned rn = INSTR (9, 5);
- aarch64_set_mem_u8 (cpu,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
- aarch64_get_vec_u8 (cpu, st, 0));
- }
- /* 8 bit store scaled or unscaled zero- or
- sign-extended 8-bit register offset. */
- static void
- fstrb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- uint64_t displacement = scaling == Scaled ? extended : 0;
- aarch64_set_mem_u8
- (cpu, address + displacement, aarch64_get_vec_u8 (cpu, st, 0));
- }
- /* 16 bit store scaled. */
- static void
- fstrh_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned st = INSTR (4, 0);
- unsigned rn = INSTR (9, 5);
- aarch64_set_mem_u16
- (cpu,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 16),
- aarch64_get_vec_u16 (cpu, st, 0));
- }
- /* 16 bit store scaled or unscaled zero-
- or sign-extended 16-bit register offset. */
- static void
- fstrh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- uint64_t displacement = OPT_SCALE (extended, 16, scaling);
- aarch64_set_mem_u16
- (cpu, address + displacement, aarch64_get_vec_u16 (cpu, st, 0));
- }
- /* 32 bit store scaled unsigned 12 bit. */
- static void
- fstrs_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned st = INSTR (4, 0);
- unsigned rn = INSTR (9, 5);
- aarch64_set_mem_u32
- (cpu,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 32),
- aarch64_get_vec_u32 (cpu, st, 0));
- }
- /* 32 bit store unscaled signed 9 bit with pre- or post-writeback. */
- static void
- fstrs_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- aarch64_set_mem_u32 (cpu, address, aarch64_get_vec_u32 (cpu, st, 0));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 32 bit store scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- fstrs_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- uint64_t displacement = OPT_SCALE (extended, 32, scaling);
- aarch64_set_mem_u32
- (cpu, address + displacement, aarch64_get_vec_u32 (cpu, st, 0));
- }
- /* 64 bit store scaled unsigned 12 bit. */
- static void
- fstrd_abs (sim_cpu *cpu, uint32_t offset)
- {
- unsigned st = INSTR (4, 0);
- unsigned rn = INSTR (9, 5);
- aarch64_set_mem_u64
- (cpu,
- aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 64),
- aarch64_get_vec_u64 (cpu, st, 0));
- }
- /* 64 bit store unscaled signed 9 bit with pre- or post-writeback. */
- static void
- fstrd_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- aarch64_set_mem_u64 (cpu, address, aarch64_get_vec_u64 (cpu, st, 0));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 64 bit store scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- fstrd_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- uint64_t displacement = OPT_SCALE (extended, 64, scaling);
- aarch64_set_mem_u64
- (cpu, address + displacement, aarch64_get_vec_u64 (cpu, st, 0));
- }
- /* 128 bit store scaled unsigned 12 bit. */
- static void
- fstrq_abs (sim_cpu *cpu, uint32_t offset)
- {
- FRegister a;
- unsigned st = INSTR (4, 0);
- unsigned rn = INSTR (9, 5);
- uint64_t addr;
- aarch64_get_FP_long_double (cpu, st, & a);
- addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 128);
- aarch64_set_mem_long_double (cpu, addr, a);
- }
- /* 128 bit store unscaled signed 9 bit with pre- or post-writeback. */
- static void
- fstrq_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- FRegister a;
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- if (wb != Post)
- address += offset;
- aarch64_get_FP_long_double (cpu, st, & a);
- aarch64_set_mem_long_double (cpu, address, a);
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rn, SP_OK, address);
- }
- /* 128 bit store scaled or unscaled zero-
- or sign-extended 32-bit register offset. */
- static void
- fstrq_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned st = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
- extension);
- uint64_t displacement = OPT_SCALE (extended, 128, scaling);
- FRegister a;
- aarch64_get_FP_long_double (cpu, st, & a);
- aarch64_set_mem_long_double (cpu, address + displacement, a);
- }
- static void
- dexLoadImmediatePrePost (sim_cpu *cpu)
- {
- /* instr[31,30] = size
- instr[29,27] = 111
- instr[26] = V
- instr[25,24] = 00
- instr[23,22] = opc
- instr[21] = 0
- instr[20,12] = simm9
- instr[11] = wb : 0 ==> Post, 1 ==> Pre
- instr[10] = 0
- instr[9,5] = Rn may be SP.
- instr[4,0] = Rt */
- uint32_t V = INSTR (26, 26);
- uint32_t dispatch = ((INSTR (31, 30) << 2) | INSTR (23, 22));
- int32_t imm = simm32 (aarch64_get_instr (cpu), 20, 12);
- WriteBack wb = INSTR (11, 11);
- if (!V)
- {
- /* GReg operations. */
- switch (dispatch)
- {
- case 0: strb_wb (cpu, imm, wb); return;
- case 1: ldrb32_wb (cpu, imm, wb); return;
- case 2: ldrsb_wb (cpu, imm, wb); return;
- case 3: ldrsb32_wb (cpu, imm, wb); return;
- case 4: strh_wb (cpu, imm, wb); return;
- case 5: ldrh32_wb (cpu, imm, wb); return;
- case 6: ldrsh64_wb (cpu, imm, wb); return;
- case 7: ldrsh32_wb (cpu, imm, wb); return;
- case 8: str32_wb (cpu, imm, wb); return;
- case 9: ldr32_wb (cpu, imm, wb); return;
- case 10: ldrsw_wb (cpu, imm, wb); return;
- case 12: str_wb (cpu, imm, wb); return;
- case 13: ldr_wb (cpu, imm, wb); return;
- default:
- case 11:
- case 14:
- case 15:
- HALT_UNALLOC;
- }
- }
- /* FReg operations. */
- switch (dispatch)
- {
- case 2: fstrq_wb (cpu, imm, wb); return;
- case 3: fldrq_wb (cpu, imm, wb); return;
- case 8: fstrs_wb (cpu, imm, wb); return;
- case 9: fldrs_wb (cpu, imm, wb); return;
- case 12: fstrd_wb (cpu, imm, wb); return;
- case 13: fldrd_wb (cpu, imm, wb); return;
- case 0: /* STUR 8 bit FP. */
- case 1: /* LDUR 8 bit FP. */
- case 4: /* STUR 16 bit FP. */
- case 5: /* LDUR 8 bit FP. */
- HALT_NYI;
- default:
- case 6:
- case 7:
- case 10:
- case 11:
- case 14:
- case 15:
- HALT_UNALLOC;
- }
- }
- static void
- dexLoadRegisterOffset (sim_cpu *cpu)
- {
- /* instr[31,30] = size
- instr[29,27] = 111
- instr[26] = V
- instr[25,24] = 00
- instr[23,22] = opc
- instr[21] = 1
- instr[20,16] = rm
- instr[15,13] = option : 010 ==> UXTW, 011 ==> UXTX/LSL,
- 110 ==> SXTW, 111 ==> SXTX,
- ow ==> RESERVED
- instr[12] = scaled
- instr[11,10] = 10
- instr[9,5] = rn
- instr[4,0] = rt. */
- uint32_t V = INSTR (26, 26);
- uint32_t dispatch = ((INSTR (31, 30) << 2) | INSTR (23, 22));
- Scaling scale = INSTR (12, 12);
- Extension extensionType = INSTR (15, 13);
- /* Check for illegal extension types. */
- if (uimm (extensionType, 1, 1) == 0)
- HALT_UNALLOC;
- if (extensionType == UXTX || extensionType == SXTX)
- extensionType = NoExtension;
- if (!V)
- {
- /* GReg operations. */
- switch (dispatch)
- {
- case 0: strb_scale_ext (cpu, scale, extensionType); return;
- case 1: ldrb32_scale_ext (cpu, scale, extensionType); return;
- case 2: ldrsb_scale_ext (cpu, scale, extensionType); return;
- case 3: ldrsb32_scale_ext (cpu, scale, extensionType); return;
- case 4: strh_scale_ext (cpu, scale, extensionType); return;
- case 5: ldrh32_scale_ext (cpu, scale, extensionType); return;
- case 6: ldrsh_scale_ext (cpu, scale, extensionType); return;
- case 7: ldrsh32_scale_ext (cpu, scale, extensionType); return;
- case 8: str32_scale_ext (cpu, scale, extensionType); return;
- case 9: ldr32_scale_ext (cpu, scale, extensionType); return;
- case 10: ldrsw_scale_ext (cpu, scale, extensionType); return;
- case 12: str_scale_ext (cpu, scale, extensionType); return;
- case 13: ldr_scale_ext (cpu, scale, extensionType); return;
- case 14: prfm_scale_ext (cpu, scale, extensionType); return;
- default:
- case 11:
- case 15:
- HALT_UNALLOC;
- }
- }
- /* FReg operations. */
- switch (dispatch)
- {
- case 1: /* LDUR 8 bit FP. */
- HALT_NYI;
- case 3: fldrq_scale_ext (cpu, scale, extensionType); return;
- case 5: /* LDUR 8 bit FP. */
- HALT_NYI;
- case 9: fldrs_scale_ext (cpu, scale, extensionType); return;
- case 13: fldrd_scale_ext (cpu, scale, extensionType); return;
- case 0: fstrb_scale_ext (cpu, scale, extensionType); return;
- case 2: fstrq_scale_ext (cpu, scale, extensionType); return;
- case 4: fstrh_scale_ext (cpu, scale, extensionType); return;
- case 8: fstrs_scale_ext (cpu, scale, extensionType); return;
- case 12: fstrd_scale_ext (cpu, scale, extensionType); return;
- default:
- case 6:
- case 7:
- case 10:
- case 11:
- case 14:
- case 15:
- HALT_UNALLOC;
- }
- }
- static void
- dexLoadUnsignedImmediate (sim_cpu *cpu)
- {
- /* instr[29,24] == 111_01
- instr[31,30] = size
- instr[26] = V
- instr[23,22] = opc
- instr[21,10] = uimm12 : unsigned immediate offset
- instr[9,5] = rn may be SP.
- instr[4,0] = rt. */
- uint32_t V = INSTR (26,26);
- uint32_t dispatch = ((INSTR (31, 30) << 2) | INSTR (23, 22));
- uint32_t imm = INSTR (21, 10);
- if (!V)
- {
- /* GReg operations. */
- switch (dispatch)
- {
- case 0: strb_abs (cpu, imm); return;
- case 1: ldrb32_abs (cpu, imm); return;
- case 2: ldrsb_abs (cpu, imm); return;
- case 3: ldrsb32_abs (cpu, imm); return;
- case 4: strh_abs (cpu, imm); return;
- case 5: ldrh32_abs (cpu, imm); return;
- case 6: ldrsh_abs (cpu, imm); return;
- case 7: ldrsh32_abs (cpu, imm); return;
- case 8: str32_abs (cpu, imm); return;
- case 9: ldr32_abs (cpu, imm); return;
- case 10: ldrsw_abs (cpu, imm); return;
- case 12: str_abs (cpu, imm); return;
- case 13: ldr_abs (cpu, imm); return;
- case 14: prfm_abs (cpu, imm); return;
- default:
- case 11:
- case 15:
- HALT_UNALLOC;
- }
- }
- /* FReg operations. */
- switch (dispatch)
- {
- case 0: fstrb_abs (cpu, imm); return;
- case 4: fstrh_abs (cpu, imm); return;
- case 8: fstrs_abs (cpu, imm); return;
- case 12: fstrd_abs (cpu, imm); return;
- case 2: fstrq_abs (cpu, imm); return;
- case 1: fldrb_abs (cpu, imm); return;
- case 5: fldrh_abs (cpu, imm); return;
- case 9: fldrs_abs (cpu, imm); return;
- case 13: fldrd_abs (cpu, imm); return;
- case 3: fldrq_abs (cpu, imm); return;
- default:
- case 6:
- case 7:
- case 10:
- case 11:
- case 14:
- case 15:
- HALT_UNALLOC;
- }
- }
- static void
- dexLoadExclusive (sim_cpu *cpu)
- {
- /* assert instr[29:24] = 001000;
- instr[31,30] = size
- instr[23] = 0 if exclusive
- instr[22] = L : 1 if load, 0 if store
- instr[21] = 1 if pair
- instr[20,16] = Rs
- instr[15] = o0 : 1 if ordered
- instr[14,10] = Rt2
- instr[9,5] = Rn
- instr[4.0] = Rt. */
- switch (INSTR (22, 21))
- {
- case 2: ldxr (cpu); return;
- case 0: stxr (cpu); return;
- default: HALT_NYI;
- }
- }
- static void
- dexLoadOther (sim_cpu *cpu)
- {
- uint32_t dispatch;
- /* instr[29,25] = 111_0
- instr[24] == 0 ==> dispatch, 1 ==> ldst reg unsigned immediate
- instr[21:11,10] is the secondary dispatch. */
- if (INSTR (24, 24))
- {
- dexLoadUnsignedImmediate (cpu);
- return;
- }
- dispatch = ((INSTR (21, 21) << 2) | INSTR (11, 10));
- switch (dispatch)
- {
- case 0: dexLoadUnscaledImmediate (cpu); return;
- case 1: dexLoadImmediatePrePost (cpu); return;
- case 3: dexLoadImmediatePrePost (cpu); return;
- case 6: dexLoadRegisterOffset (cpu); return;
- default:
- case 2:
- case 4:
- case 5:
- case 7:
- HALT_NYI;
- }
- }
- static void
- store_pair_u32 (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- if ((rn == rd || rm == rd) && wb != NoWriteBack)
- HALT_UNALLOC; /* ??? */
- offset <<= 2;
- if (wb != Post)
- address += offset;
- aarch64_set_mem_u32 (cpu, address,
- aarch64_get_reg_u32 (cpu, rm, NO_SP));
- aarch64_set_mem_u32 (cpu, address + 4,
- aarch64_get_reg_u32 (cpu, rn, NO_SP));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- store_pair_u64 (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- if ((rn == rd || rm == rd) && wb != NoWriteBack)
- HALT_UNALLOC; /* ??? */
- offset <<= 3;
- if (wb != Post)
- address += offset;
- aarch64_set_mem_u64 (cpu, address,
- aarch64_get_reg_u64 (cpu, rm, NO_SP));
- aarch64_set_mem_u64 (cpu, address + 8,
- aarch64_get_reg_u64 (cpu, rn, NO_SP));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- load_pair_u32 (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- /* Treat this as unalloc to make sure we don't do it. */
- if (rn == rm)
- HALT_UNALLOC;
- offset <<= 2;
- if (wb != Post)
- address += offset;
- aarch64_set_reg_u64 (cpu, rm, SP_OK, aarch64_get_mem_u32 (cpu, address));
- aarch64_set_reg_u64 (cpu, rn, SP_OK, aarch64_get_mem_u32 (cpu, address + 4));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- load_pair_s32 (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- /* Treat this as unalloc to make sure we don't do it. */
- if (rn == rm)
- HALT_UNALLOC;
- offset <<= 2;
- if (wb != Post)
- address += offset;
- aarch64_set_reg_s64 (cpu, rm, SP_OK, aarch64_get_mem_s32 (cpu, address));
- aarch64_set_reg_s64 (cpu, rn, SP_OK, aarch64_get_mem_s32 (cpu, address + 4));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- load_pair_u64 (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- /* Treat this as unalloc to make sure we don't do it. */
- if (rn == rm)
- HALT_UNALLOC;
- offset <<= 3;
- if (wb != Post)
- address += offset;
- aarch64_set_reg_u64 (cpu, rm, SP_OK, aarch64_get_mem_u64 (cpu, address));
- aarch64_set_reg_u64 (cpu, rn, SP_OK, aarch64_get_mem_u64 (cpu, address + 8));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- dex_load_store_pair_gr (sim_cpu *cpu)
- {
- /* instr[31,30] = size (10=> 64-bit, 01=> signed 32-bit, 00=> 32-bit)
- instr[29,25] = instruction encoding: 101_0
- instr[26] = V : 1 if fp 0 if gp
- instr[24,23] = addressing mode (10=> offset, 01=> post, 11=> pre)
- instr[22] = load/store (1=> load)
- instr[21,15] = signed, scaled, offset
- instr[14,10] = Rn
- instr[ 9, 5] = Rd
- instr[ 4, 0] = Rm. */
- uint32_t dispatch = ((INSTR (31, 30) << 3) | INSTR (24, 22));
- int32_t offset = simm32 (aarch64_get_instr (cpu), 21, 15);
- switch (dispatch)
- {
- case 2: store_pair_u32 (cpu, offset, Post); return;
- case 3: load_pair_u32 (cpu, offset, Post); return;
- case 4: store_pair_u32 (cpu, offset, NoWriteBack); return;
- case 5: load_pair_u32 (cpu, offset, NoWriteBack); return;
- case 6: store_pair_u32 (cpu, offset, Pre); return;
- case 7: load_pair_u32 (cpu, offset, Pre); return;
- case 11: load_pair_s32 (cpu, offset, Post); return;
- case 13: load_pair_s32 (cpu, offset, NoWriteBack); return;
- case 15: load_pair_s32 (cpu, offset, Pre); return;
- case 18: store_pair_u64 (cpu, offset, Post); return;
- case 19: load_pair_u64 (cpu, offset, Post); return;
- case 20: store_pair_u64 (cpu, offset, NoWriteBack); return;
- case 21: load_pair_u64 (cpu, offset, NoWriteBack); return;
- case 22: store_pair_u64 (cpu, offset, Pre); return;
- case 23: load_pair_u64 (cpu, offset, Pre); return;
- default:
- HALT_UNALLOC;
- }
- }
- static void
- store_pair_float (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- offset <<= 2;
- if (wb != Post)
- address += offset;
- aarch64_set_mem_u32 (cpu, address, aarch64_get_vec_u32 (cpu, rm, 0));
- aarch64_set_mem_u32 (cpu, address + 4, aarch64_get_vec_u32 (cpu, rn, 0));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- store_pair_double (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- offset <<= 3;
- if (wb != Post)
- address += offset;
- aarch64_set_mem_u64 (cpu, address, aarch64_get_vec_u64 (cpu, rm, 0));
- aarch64_set_mem_u64 (cpu, address + 8, aarch64_get_vec_u64 (cpu, rn, 0));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- store_pair_long_double (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- FRegister a;
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- offset <<= 4;
- if (wb != Post)
- address += offset;
- aarch64_get_FP_long_double (cpu, rm, & a);
- aarch64_set_mem_long_double (cpu, address, a);
- aarch64_get_FP_long_double (cpu, rn, & a);
- aarch64_set_mem_long_double (cpu, address + 16, a);
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- load_pair_float (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- if (rm == rn)
- HALT_UNALLOC;
- offset <<= 2;
- if (wb != Post)
- address += offset;
- aarch64_set_vec_u32 (cpu, rm, 0, aarch64_get_mem_u32 (cpu, address));
- aarch64_set_vec_u32 (cpu, rn, 0, aarch64_get_mem_u32 (cpu, address + 4));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- load_pair_double (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- if (rm == rn)
- HALT_UNALLOC;
- offset <<= 3;
- if (wb != Post)
- address += offset;
- aarch64_set_vec_u64 (cpu, rm, 0, aarch64_get_mem_u64 (cpu, address));
- aarch64_set_vec_u64 (cpu, rn, 0, aarch64_get_mem_u64 (cpu, address + 8));
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- load_pair_long_double (sim_cpu *cpu, int32_t offset, WriteBack wb)
- {
- FRegister a;
- unsigned rn = INSTR (14, 10);
- unsigned rd = INSTR (9, 5);
- unsigned rm = INSTR (4, 0);
- uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
- if (rm == rn)
- HALT_UNALLOC;
- offset <<= 4;
- if (wb != Post)
- address += offset;
- aarch64_get_mem_long_double (cpu, address, & a);
- aarch64_set_FP_long_double (cpu, rm, a);
- aarch64_get_mem_long_double (cpu, address + 16, & a);
- aarch64_set_FP_long_double (cpu, rn, a);
- if (wb == Post)
- address += offset;
- if (wb != NoWriteBack)
- aarch64_set_reg_u64 (cpu, rd, SP_OK, address);
- }
- static void
- dex_load_store_pair_fp (sim_cpu *cpu)
- {
- /* instr[31,30] = size (10=> 128-bit, 01=> 64-bit, 00=> 32-bit)
- instr[29,25] = instruction encoding
- instr[24,23] = addressing mode (10=> offset, 01=> post, 11=> pre)
- instr[22] = load/store (1=> load)
- instr[21,15] = signed, scaled, offset
- instr[14,10] = Rn
- instr[ 9, 5] = Rd
- instr[ 4, 0] = Rm */
- uint32_t dispatch = ((INSTR (31, 30) << 3) | INSTR (24, 22));
- int32_t offset = simm32 (aarch64_get_instr (cpu), 21, 15);
- switch (dispatch)
- {
- case 2: store_pair_float (cpu, offset, Post); return;
- case 3: load_pair_float (cpu, offset, Post); return;
- case 4: store_pair_float (cpu, offset, NoWriteBack); return;
- case 5: load_pair_float (cpu, offset, NoWriteBack); return;
- case 6: store_pair_float (cpu, offset, Pre); return;
- case 7: load_pair_float (cpu, offset, Pre); return;
- case 10: store_pair_double (cpu, offset, Post); return;
- case 11: load_pair_double (cpu, offset, Post); return;
- case 12: store_pair_double (cpu, offset, NoWriteBack); return;
- case 13: load_pair_double (cpu, offset, NoWriteBack); return;
- case 14: store_pair_double (cpu, offset, Pre); return;
- case 15: load_pair_double (cpu, offset, Pre); return;
- case 18: store_pair_long_double (cpu, offset, Post); return;
- case 19: load_pair_long_double (cpu, offset, Post); return;
- case 20: store_pair_long_double (cpu, offset, NoWriteBack); return;
- case 21: load_pair_long_double (cpu, offset, NoWriteBack); return;
- case 22: store_pair_long_double (cpu, offset, Pre); return;
- case 23: load_pair_long_double (cpu, offset, Pre); return;
- default:
- HALT_UNALLOC;
- }
- }
- static inline unsigned
- vec_reg (unsigned v, unsigned o)
- {
- return (v + o) & 0x3F;
- }
- /* Load multiple N-element structures to M consecutive registers. */
- static void
- vec_load (sim_cpu *cpu, uint64_t address, unsigned N, unsigned M)
- {
- int all = INSTR (30, 30);
- unsigned size = INSTR (11, 10);
- unsigned vd = INSTR (4, 0);
- unsigned rpt = (N == M) ? 1 : M;
- unsigned selem = N;
- unsigned i, j, k;
- switch (size)
- {
- case 0: /* 8-bit operations. */
- for (i = 0; i < rpt; i++)
- for (j = 0; j < (8 + (8 * all)); j++)
- for (k = 0; k < selem; k++)
- {
- aarch64_set_vec_u8 (cpu, vec_reg (vd, i + k), j,
- aarch64_get_mem_u8 (cpu, address));
- address += 1;
- }
- return;
- case 1: /* 16-bit operations. */
- for (i = 0; i < rpt; i++)
- for (j = 0; j < (4 + (4 * all)); j++)
- for (k = 0; k < selem; k++)
- {
- aarch64_set_vec_u16 (cpu, vec_reg (vd, i + k), j,
- aarch64_get_mem_u16 (cpu, address));
- address += 2;
- }
- return;
- case 2: /* 32-bit operations. */
- for (i = 0; i < rpt; i++)
- for (j = 0; j < (2 + (2 * all)); j++)
- for (k = 0; k < selem; k++)
- {
- aarch64_set_vec_u32 (cpu, vec_reg (vd, i + k), j,
- aarch64_get_mem_u32 (cpu, address));
- address += 4;
- }
- return;
- case 3: /* 64-bit operations. */
- for (i = 0; i < rpt; i++)
- for (j = 0; j < (1 + all); j++)
- for (k = 0; k < selem; k++)
- {
- aarch64_set_vec_u64 (cpu, vec_reg (vd, i + k), j,
- aarch64_get_mem_u64 (cpu, address));
- address += 8;
- }
- return;
- }
- }
- /* Load multiple 4-element structures into four consecutive registers. */
- static void
- LD4 (sim_cpu *cpu, uint64_t address)
- {
- vec_load (cpu, address, 4, 4);
- }
- /* Load multiple 3-element structures into three consecutive registers. */
- static void
- LD3 (sim_cpu *cpu, uint64_t address)
- {
- vec_load (cpu, address, 3, 3);
- }
- /* Load multiple 2-element structures into two consecutive registers. */
- static void
- LD2 (sim_cpu *cpu, uint64_t address)
- {
- vec_load (cpu, address, 2, 2);
- }
- /* Load multiple 1-element structures into one register. */
- static void
- LD1_1 (sim_cpu *cpu, uint64_t address)
- {
- vec_load (cpu, address, 1, 1);
- }
- /* Load multiple 1-element structures into two registers. */
- static void
- LD1_2 (sim_cpu *cpu, uint64_t address)
- {
- vec_load (cpu, address, 1, 2);
- }
- /* Load multiple 1-element structures into three registers. */
- static void
- LD1_3 (sim_cpu *cpu, uint64_t address)
- {
- vec_load (cpu, address, 1, 3);
- }
- /* Load multiple 1-element structures into four registers. */
- static void
- LD1_4 (sim_cpu *cpu, uint64_t address)
- {
- vec_load (cpu, address, 1, 4);
- }
- /* Store multiple N-element structures from M consecutive registers. */
- static void
- vec_store (sim_cpu *cpu, uint64_t address, unsigned N, unsigned M)
- {
- int all = INSTR (30, 30);
- unsigned size = INSTR (11, 10);
- unsigned vd = INSTR (4, 0);
- unsigned rpt = (N == M) ? 1 : M;
- unsigned selem = N;
- unsigned i, j, k;
- switch (size)
- {
- case 0: /* 8-bit operations. */
- for (i = 0; i < rpt; i++)
- for (j = 0; j < (8 + (8 * all)); j++)
- for (k = 0; k < selem; k++)
- {
- aarch64_set_mem_u8
- (cpu, address,
- aarch64_get_vec_u8 (cpu, vec_reg (vd, i + k), j));
- address += 1;
- }
- return;
- case 1: /* 16-bit operations. */
- for (i = 0; i < rpt; i++)
- for (j = 0; j < (4 + (4 * all)); j++)
- for (k = 0; k < selem; k++)
- {
- aarch64_set_mem_u16
- (cpu, address,
- aarch64_get_vec_u16 (cpu, vec_reg (vd, i + k), j));
- address += 2;
- }
- return;
- case 2: /* 32-bit operations. */
- for (i = 0; i < rpt; i++)
- for (j = 0; j < (2 + (2 * all)); j++)
- for (k = 0; k < selem; k++)
- {
- aarch64_set_mem_u32
- (cpu, address,
- aarch64_get_vec_u32 (cpu, vec_reg (vd, i + k), j));
- address += 4;
- }
- return;
- case 3: /* 64-bit operations. */
- for (i = 0; i < rpt; i++)
- for (j = 0; j < (1 + all); j++)
- for (k = 0; k < selem; k++)
- {
- aarch64_set_mem_u64
- (cpu, address,
- aarch64_get_vec_u64 (cpu, vec_reg (vd, i + k), j));
- address += 8;
- }
- return;
- }
- }
- /* Store multiple 4-element structure from four consecutive registers. */
- static void
- ST4 (sim_cpu *cpu, uint64_t address)
- {
- vec_store (cpu, address, 4, 4);
- }
- /* Store multiple 3-element structures from three consecutive registers. */
- static void
- ST3 (sim_cpu *cpu, uint64_t address)
- {
- vec_store (cpu, address, 3, 3);
- }
- /* Store multiple 2-element structures from two consecutive registers. */
- static void
- ST2 (sim_cpu *cpu, uint64_t address)
- {
- vec_store (cpu, address, 2, 2);
- }
- /* Store multiple 1-element structures from one register. */
- static void
- ST1_1 (sim_cpu *cpu, uint64_t address)
- {
- vec_store (cpu, address, 1, 1);
- }
- /* Store multiple 1-element structures from two registers. */
- static void
- ST1_2 (sim_cpu *cpu, uint64_t address)
- {
- vec_store (cpu, address, 1, 2);
- }
- /* Store multiple 1-element structures from three registers. */
- static void
- ST1_3 (sim_cpu *cpu, uint64_t address)
- {
- vec_store (cpu, address, 1, 3);
- }
- /* Store multiple 1-element structures from four registers. */
- static void
- ST1_4 (sim_cpu *cpu, uint64_t address)
- {
- vec_store (cpu, address, 1, 4);
- }
- #define LDn_STn_SINGLE_LANE_AND_SIZE() \
- do \
- { \
- switch (INSTR (15, 14)) \
- { \
- case 0: \
- lane = (full << 3) | (s << 2) | size; \
- size = 0; \
- break; \
- \
- case 1: \
- if ((size & 1) == 1) \
- HALT_UNALLOC; \
- lane = (full << 2) | (s << 1) | (size >> 1); \
- size = 1; \
- break; \
- \
- case 2: \
- if ((size & 2) == 2) \
- HALT_UNALLOC; \
- \
- if ((size & 1) == 0) \
- { \
- lane = (full << 1) | s; \
- size = 2; \
- } \
- else \
- { \
- if (s) \
- HALT_UNALLOC; \
- lane = full; \
- size = 3; \
- } \
- break; \
- \
- default: \
- HALT_UNALLOC; \
- } \
- } \
- while (0)
- /* Load single structure into one lane of N registers. */
- static void
- do_vec_LDn_single (sim_cpu *cpu, uint64_t address)
- {
- /* instr[31] = 0
- instr[30] = element selector 0=>half, 1=>all elements
- instr[29,24] = 00 1101
- instr[23] = 0=>simple, 1=>post
- instr[22] = 1
- instr[21] = width: LD1-or-LD3 (0) / LD2-or-LD4 (1)
- instr[20,16] = 0 0000 (simple), Vinc (reg-post-inc, no SP),
- 11111 (immediate post inc)
- instr[15,13] = opcode
- instr[12] = S, used for lane number
- instr[11,10] = size, also used for lane number
- instr[9,5] = address
- instr[4,0] = Vd */
- unsigned full = INSTR (30, 30);
- unsigned vd = INSTR (4, 0);
- unsigned size = INSTR (11, 10);
- unsigned s = INSTR (12, 12);
- int nregs = ((INSTR (13, 13) << 1) | INSTR (21, 21)) + 1;
- int lane = 0;
- int i;
- NYI_assert (29, 24, 0x0D);
- NYI_assert (22, 22, 1);
- /* Compute the lane number first (using size), and then compute size. */
- LDn_STn_SINGLE_LANE_AND_SIZE ();
- for (i = 0; i < nregs; i++)
- switch (size)
- {
- case 0:
- {
- uint8_t val = aarch64_get_mem_u8 (cpu, address + i);
- aarch64_set_vec_u8 (cpu, vd + i, lane, val);
- break;
- }
- case 1:
- {
- uint16_t val = aarch64_get_mem_u16 (cpu, address + (i * 2));
- aarch64_set_vec_u16 (cpu, vd + i, lane, val);
- break;
- }
- case 2:
- {
- uint32_t val = aarch64_get_mem_u32 (cpu, address + (i * 4));
- aarch64_set_vec_u32 (cpu, vd + i, lane, val);
- break;
- }
- case 3:
- {
- uint64_t val = aarch64_get_mem_u64 (cpu, address + (i * 8));
- aarch64_set_vec_u64 (cpu, vd + i, lane, val);
- break;
- }
- }
- }
- /* Store single structure from one lane from N registers. */
- static void
- do_vec_STn_single (sim_cpu *cpu, uint64_t address)
- {
- /* instr[31] = 0
- instr[30] = element selector 0=>half, 1=>all elements
- instr[29,24] = 00 1101
- instr[23] = 0=>simple, 1=>post
- instr[22] = 0
- instr[21] = width: LD1-or-LD3 (0) / LD2-or-LD4 (1)
- instr[20,16] = 0 0000 (simple), Vinc (reg-post-inc, no SP),
- 11111 (immediate post inc)
- instr[15,13] = opcode
- instr[12] = S, used for lane number
- instr[11,10] = size, also used for lane number
- instr[9,5] = address
- instr[4,0] = Vd */
- unsigned full = INSTR (30, 30);
- unsigned vd = INSTR (4, 0);
- unsigned size = INSTR (11, 10);
- unsigned s = INSTR (12, 12);
- int nregs = ((INSTR (13, 13) << 1) | INSTR (21, 21)) + 1;
- int lane = 0;
- int i;
- NYI_assert (29, 24, 0x0D);
- NYI_assert (22, 22, 0);
- /* Compute the lane number first (using size), and then compute size. */
- LDn_STn_SINGLE_LANE_AND_SIZE ();
- for (i = 0; i < nregs; i++)
- switch (size)
- {
- case 0:
- {
- uint8_t val = aarch64_get_vec_u8 (cpu, vd + i, lane);
- aarch64_set_mem_u8 (cpu, address + i, val);
- break;
- }
- case 1:
- {
- uint16_t val = aarch64_get_vec_u16 (cpu, vd + i, lane);
- aarch64_set_mem_u16 (cpu, address + (i * 2), val);
- break;
- }
- case 2:
- {
- uint32_t val = aarch64_get_vec_u32 (cpu, vd + i, lane);
- aarch64_set_mem_u32 (cpu, address + (i * 4), val);
- break;
- }
- case 3:
- {
- uint64_t val = aarch64_get_vec_u64 (cpu, vd + i, lane);
- aarch64_set_mem_u64 (cpu, address + (i * 8), val);
- break;
- }
- }
- }
- /* Load single structure into all lanes of N registers. */
- static void
- do_vec_LDnR (sim_cpu *cpu, uint64_t address)
- {
- /* instr[31] = 0
- instr[30] = element selector 0=>half, 1=>all elements
- instr[29,24] = 00 1101
- instr[23] = 0=>simple, 1=>post
- instr[22] = 1
- instr[21] = width: LD1R-or-LD3R (0) / LD2R-or-LD4R (1)
- instr[20,16] = 0 0000 (simple), Vinc (reg-post-inc, no SP),
- 11111 (immediate post inc)
- instr[15,14] = 11
- instr[13] = width: LD1R-or-LD2R (0) / LD3R-or-LD4R (1)
- instr[12] = 0
- instr[11,10] = element size 00=> byte(b), 01=> half(h),
- 10=> word(s), 11=> double(d)
- instr[9,5] = address
- instr[4,0] = Vd */
- unsigned full = INSTR (30, 30);
- unsigned vd = INSTR (4, 0);
- unsigned size = INSTR (11, 10);
- int nregs = ((INSTR (13, 13) << 1) | INSTR (21, 21)) + 1;
- int i, n;
- NYI_assert (29, 24, 0x0D);
- NYI_assert (22, 22, 1);
- NYI_assert (15, 14, 3);
- NYI_assert (12, 12, 0);
- for (n = 0; n < nregs; n++)
- switch (size)
- {
- case 0:
- {
- uint8_t val = aarch64_get_mem_u8 (cpu, address + n);
- for (i = 0; i < (full ? 16 : 8); i++)
- aarch64_set_vec_u8 (cpu, vd + n, i, val);
- break;
- }
- case 1:
- {
- uint16_t val = aarch64_get_mem_u16 (cpu, address + (n * 2));
- for (i = 0; i < (full ? 8 : 4); i++)
- aarch64_set_vec_u16 (cpu, vd + n, i, val);
- break;
- }
- case 2:
- {
- uint32_t val = aarch64_get_mem_u32 (cpu, address + (n * 4));
- for (i = 0; i < (full ? 4 : 2); i++)
- aarch64_set_vec_u32 (cpu, vd + n, i, val);
- break;
- }
- case 3:
- {
- uint64_t val = aarch64_get_mem_u64 (cpu, address + (n * 8));
- for (i = 0; i < (full ? 2 : 1); i++)
- aarch64_set_vec_u64 (cpu, vd + n, i, val);
- break;
- }
- default:
- HALT_UNALLOC;
- }
- }
- static void
- do_vec_load_store (sim_cpu *cpu)
- {
- /* {LD|ST}<N> {Vd..Vd+N}, vaddr
- instr[31] = 0
- instr[30] = element selector 0=>half, 1=>all elements
- instr[29,25] = 00110
- instr[24] = 0=>multiple struct, 1=>single struct
- instr[23] = 0=>simple, 1=>post
- instr[22] = 0=>store, 1=>load
- instr[21] = 0 (LDn) / small(0)-large(1) selector (LDnR)
- instr[20,16] = 00000 (simple), Vinc (reg-post-inc, no SP),
- 11111 (immediate post inc)
- instr[15,12] = elements and destinations. eg for load:
- 0000=>LD4 => load multiple 4-element to
- four consecutive registers
- 0100=>LD3 => load multiple 3-element to
- three consecutive registers
- 1000=>LD2 => load multiple 2-element to
- two consecutive registers
- 0010=>LD1 => load multiple 1-element to
- four consecutive registers
- 0110=>LD1 => load multiple 1-element to
- three consecutive registers
- 1010=>LD1 => load multiple 1-element to
- two consecutive registers
- 0111=>LD1 => load multiple 1-element to
- one register
- 1100=>LDR1,LDR2
- 1110=>LDR3,LDR4
- instr[11,10] = element size 00=> byte(b), 01=> half(h),
- 10=> word(s), 11=> double(d)
- instr[9,5] = Vn, can be SP
- instr[4,0] = Vd */
- int single;
- int post;
- int load;
- unsigned vn;
- uint64_t address;
- int type;
- if (INSTR (31, 31) != 0 || INSTR (29, 25) != 0x06)
- HALT_NYI;
- single = INSTR (24, 24);
- post = INSTR (23, 23);
- load = INSTR (22, 22);
- type = INSTR (15, 12);
- vn = INSTR (9, 5);
- address = aarch64_get_reg_u64 (cpu, vn, SP_OK);
- if (! single && INSTR (21, 21) != 0)
- HALT_UNALLOC;
- if (post)
- {
- unsigned vm = INSTR (20, 16);
- if (vm == R31)
- {
- unsigned sizeof_operation;
- if (single)
- {
- if ((type >= 0) && (type <= 11))
- {
- int nregs = ((INSTR (13, 13) << 1) | INSTR (21, 21)) + 1;
- switch (INSTR (15, 14))
- {
- case 0:
- sizeof_operation = nregs * 1;
- break;
- case 1:
- sizeof_operation = nregs * 2;
- break;
- case 2:
- if (INSTR (10, 10) == 0)
- sizeof_operation = nregs * 4;
- else
- sizeof_operation = nregs * 8;
- break;
- default:
- HALT_UNALLOC;
- }
- }
- else if (type == 0xC)
- {
- sizeof_operation = INSTR (21, 21) ? 2 : 1;
- sizeof_operation <<= INSTR (11, 10);
- }
- else if (type == 0xE)
- {
- sizeof_operation = INSTR (21, 21) ? 4 : 3;
- sizeof_operation <<= INSTR (11, 10);
- }
- else
- HALT_UNALLOC;
- }
- else
- {
- switch (type)
- {
- case 0: sizeof_operation = 32; break;
- case 4: sizeof_operation = 24; break;
- case 8: sizeof_operation = 16; break;
- case 7:
- /* One register, immediate offset variant. */
- sizeof_operation = 8;
- break;
- case 10:
- /* Two registers, immediate offset variant. */
- sizeof_operation = 16;
- break;
- case 6:
- /* Three registers, immediate offset variant. */
- sizeof_operation = 24;
- break;
- case 2:
- /* Four registers, immediate offset variant. */
- sizeof_operation = 32;
- break;
- default:
- HALT_UNALLOC;
- }
- if (INSTR (30, 30))
- sizeof_operation *= 2;
- }
- aarch64_set_reg_u64 (cpu, vn, SP_OK, address + sizeof_operation);
- }
- else
- aarch64_set_reg_u64 (cpu, vn, SP_OK,
- address + aarch64_get_reg_u64 (cpu, vm, NO_SP));
- }
- else
- {
- NYI_assert (20, 16, 0);
- }
- if (single)
- {
- if (load)
- {
- if ((type >= 0) && (type <= 11))
- do_vec_LDn_single (cpu, address);
- else if ((type == 0xC) || (type == 0xE))
- do_vec_LDnR (cpu, address);
- else
- HALT_UNALLOC;
- return;
- }
- /* Stores. */
- if ((type >= 0) && (type <= 11))
- {
- do_vec_STn_single (cpu, address);
- return;
- }
- HALT_UNALLOC;
- }
- if (load)
- {
- switch (type)
- {
- case 0: LD4 (cpu, address); return;
- case 4: LD3 (cpu, address); return;
- case 8: LD2 (cpu, address); return;
- case 2: LD1_4 (cpu, address); return;
- case 6: LD1_3 (cpu, address); return;
- case 10: LD1_2 (cpu, address); return;
- case 7: LD1_1 (cpu, address); return;
- default:
- HALT_UNALLOC;
- }
- }
- /* Stores. */
- switch (type)
- {
- case 0: ST4 (cpu, address); return;
- case 4: ST3 (cpu, address); return;
- case 8: ST2 (cpu, address); return;
- case 2: ST1_4 (cpu, address); return;
- case 6: ST1_3 (cpu, address); return;
- case 10: ST1_2 (cpu, address); return;
- case 7: ST1_1 (cpu, address); return;
- default:
- HALT_UNALLOC;
- }
- }
- static void
- dexLdSt (sim_cpu *cpu)
- {
- /* uint32_t group = dispatchGroup (aarch64_get_instr (cpu));
- assert group == GROUP_LDST_0100 || group == GROUP_LDST_0110 ||
- group == GROUP_LDST_1100 || group == GROUP_LDST_1110
- bits [29,28:26] of a LS are the secondary dispatch vector. */
- uint32_t group2 = dispatchLS (aarch64_get_instr (cpu));
- switch (group2)
- {
- case LS_EXCL_000:
- dexLoadExclusive (cpu); return;
- case LS_LIT_010:
- case LS_LIT_011:
- dexLoadLiteral (cpu); return;
- case LS_OTHER_110:
- case LS_OTHER_111:
- dexLoadOther (cpu); return;
- case LS_ADVSIMD_001:
- do_vec_load_store (cpu); return;
- case LS_PAIR_100:
- dex_load_store_pair_gr (cpu); return;
- case LS_PAIR_101:
- dex_load_store_pair_fp (cpu); return;
- default:
- /* Should never reach here. */
- HALT_NYI;
- }
- }
- /* Specific decode and execute for group Data Processing Register. */
- static void
- dexLogicalShiftedRegister (sim_cpu *cpu)
- {
- /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30,29] = op
- instr[28:24] = 01010
- instr[23,22] = shift : 0 ==> LSL, 1 ==> LSR, 2 ==> ASR, 3 ==> ROR
- instr[21] = N
- instr[20,16] = Rm
- instr[15,10] = count : must be 0xxxxx for 32 bit
- instr[9,5] = Rn
- instr[4,0] = Rd */
- uint32_t size = INSTR (31, 31);
- Shift shiftType = INSTR (23, 22);
- uint32_t count = INSTR (15, 10);
- /* 32 bit operations must have count[5] = 0.
- or else we have an UNALLOC. */
- if (size == 0 && uimm (count, 5, 5))
- HALT_UNALLOC;
- /* Dispatch on size:op:N. */
- switch ((INSTR (31, 29) << 1) | INSTR (21, 21))
- {
- case 0: and32_shift (cpu, shiftType, count); return;
- case 1: bic32_shift (cpu, shiftType, count); return;
- case 2: orr32_shift (cpu, shiftType, count); return;
- case 3: orn32_shift (cpu, shiftType, count); return;
- case 4: eor32_shift (cpu, shiftType, count); return;
- case 5: eon32_shift (cpu, shiftType, count); return;
- case 6: ands32_shift (cpu, shiftType, count); return;
- case 7: bics32_shift (cpu, shiftType, count); return;
- case 8: and64_shift (cpu, shiftType, count); return;
- case 9: bic64_shift (cpu, shiftType, count); return;
- case 10:orr64_shift (cpu, shiftType, count); return;
- case 11:orn64_shift (cpu, shiftType, count); return;
- case 12:eor64_shift (cpu, shiftType, count); return;
- case 13:eon64_shift (cpu, shiftType, count); return;
- case 14:ands64_shift (cpu, shiftType, count); return;
- case 15:bics64_shift (cpu, shiftType, count); return;
- }
- }
- /* 32 bit conditional select. */
- static void
- csel32 (sim_cpu *cpu, CondCode cc)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- testConditionCode (cpu, cc)
- ? aarch64_get_reg_u32 (cpu, rn, NO_SP)
- : aarch64_get_reg_u32 (cpu, rm, NO_SP));
- }
- /* 64 bit conditional select. */
- static void
- csel64 (sim_cpu *cpu, CondCode cc)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- testConditionCode (cpu, cc)
- ? aarch64_get_reg_u64 (cpu, rn, NO_SP)
- : aarch64_get_reg_u64 (cpu, rm, NO_SP));
- }
- /* 32 bit conditional increment. */
- static void
- csinc32 (sim_cpu *cpu, CondCode cc)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- testConditionCode (cpu, cc)
- ? aarch64_get_reg_u32 (cpu, rn, NO_SP)
- : aarch64_get_reg_u32 (cpu, rm, NO_SP) + 1);
- }
- /* 64 bit conditional increment. */
- static void
- csinc64 (sim_cpu *cpu, CondCode cc)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- testConditionCode (cpu, cc)
- ? aarch64_get_reg_u64 (cpu, rn, NO_SP)
- : aarch64_get_reg_u64 (cpu, rm, NO_SP) + 1);
- }
- /* 32 bit conditional invert. */
- static void
- csinv32 (sim_cpu *cpu, CondCode cc)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- testConditionCode (cpu, cc)
- ? aarch64_get_reg_u32 (cpu, rn, NO_SP)
- : ~ aarch64_get_reg_u32 (cpu, rm, NO_SP));
- }
- /* 64 bit conditional invert. */
- static void
- csinv64 (sim_cpu *cpu, CondCode cc)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- testConditionCode (cpu, cc)
- ? aarch64_get_reg_u64 (cpu, rn, NO_SP)
- : ~ aarch64_get_reg_u64 (cpu, rm, NO_SP));
- }
- /* 32 bit conditional negate. */
- static void
- csneg32 (sim_cpu *cpu, CondCode cc)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- testConditionCode (cpu, cc)
- ? aarch64_get_reg_u32 (cpu, rn, NO_SP)
- : - aarch64_get_reg_u32 (cpu, rm, NO_SP));
- }
- /* 64 bit conditional negate. */
- static void
- csneg64 (sim_cpu *cpu, CondCode cc)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- testConditionCode (cpu, cc)
- ? aarch64_get_reg_u64 (cpu, rn, NO_SP)
- : - aarch64_get_reg_u64 (cpu, rm, NO_SP));
- }
- static void
- dexCondSelect (sim_cpu *cpu)
- {
- /* instr[28,21] = 11011011
- instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30:11,10] = op : 000 ==> CSEL, 001 ==> CSINC,
- 100 ==> CSINV, 101 ==> CSNEG,
- _1_ ==> UNALLOC
- instr[29] = S : 0 ==> ok, 1 ==> UNALLOC
- instr[15,12] = cond
- instr[29] = S : 0 ==> ok, 1 ==> UNALLOC */
- CondCode cc = INSTR (15, 12);
- uint32_t S = INSTR (29, 29);
- uint32_t op2 = INSTR (11, 10);
- if (S == 1)
- HALT_UNALLOC;
- if (op2 & 0x2)
- HALT_UNALLOC;
- switch ((INSTR (31, 30) << 1) | op2)
- {
- case 0: csel32 (cpu, cc); return;
- case 1: csinc32 (cpu, cc); return;
- case 2: csinv32 (cpu, cc); return;
- case 3: csneg32 (cpu, cc); return;
- case 4: csel64 (cpu, cc); return;
- case 5: csinc64 (cpu, cc); return;
- case 6: csinv64 (cpu, cc); return;
- case 7: csneg64 (cpu, cc); return;
- }
- }
- /* Some helpers for counting leading 1 or 0 bits. */
- /* Counts the number of leading bits which are the same
- in a 32 bit value in the range 1 to 32. */
- static uint32_t
- leading32 (uint32_t value)
- {
- int32_t mask= 0xffff0000;
- uint32_t count= 16; /* Counts number of bits set in mask. */
- uint32_t lo = 1; /* Lower bound for number of sign bits. */
- uint32_t hi = 32; /* Upper bound for number of sign bits. */
- while (lo + 1 < hi)
- {
- int32_t test = (value & mask);
- if (test == 0 || test == mask)
- {
- lo = count;
- count = (lo + hi) / 2;
- mask >>= (count - lo);
- }
- else
- {
- hi = count;
- count = (lo + hi) / 2;
- mask <<= hi - count;
- }
- }
- if (lo != hi)
- {
- int32_t test;
- mask >>= 1;
- test = (value & mask);
- if (test == 0 || test == mask)
- count = hi;
- else
- count = lo;
- }
- return count;
- }
- /* Counts the number of leading bits which are the same
- in a 64 bit value in the range 1 to 64. */
- static uint64_t
- leading64 (uint64_t value)
- {
- int64_t mask= 0xffffffff00000000LL;
- uint64_t count = 32; /* Counts number of bits set in mask. */
- uint64_t lo = 1; /* Lower bound for number of sign bits. */
- uint64_t hi = 64; /* Upper bound for number of sign bits. */
- while (lo + 1 < hi)
- {
- int64_t test = (value & mask);
- if (test == 0 || test == mask)
- {
- lo = count;
- count = (lo + hi) / 2;
- mask >>= (count - lo);
- }
- else
- {
- hi = count;
- count = (lo + hi) / 2;
- mask <<= hi - count;
- }
- }
- if (lo != hi)
- {
- int64_t test;
- mask >>= 1;
- test = (value & mask);
- if (test == 0 || test == mask)
- count = hi;
- else
- count = lo;
- }
- return count;
- }
- /* Bit operations. */
- /* N.B register args may not be SP. */
- /* 32 bit count leading sign bits. */
- static void
- cls32 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* N.B. the result needs to exclude the leading bit. */
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, leading32 (aarch64_get_reg_u32 (cpu, rn, NO_SP)) - 1);
- }
- /* 64 bit count leading sign bits. */
- static void
- cls64 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* N.B. the result needs to exclude the leading bit. */
- aarch64_set_reg_u64
- (cpu, rd, NO_SP, leading64 (aarch64_get_reg_u64 (cpu, rn, NO_SP)) - 1);
- }
- /* 32 bit count leading zero bits. */
- static void
- clz32 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- /* if the sign (top) bit is set then the count is 0. */
- if (pick32 (value, 31, 31))
- aarch64_set_reg_u64 (cpu, rd, NO_SP, 0L);
- else
- aarch64_set_reg_u64 (cpu, rd, NO_SP, leading32 (value));
- }
- /* 64 bit count leading zero bits. */
- static void
- clz64 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- /* if the sign (top) bit is set then the count is 0. */
- if (pick64 (value, 63, 63))
- aarch64_set_reg_u64 (cpu, rd, NO_SP, 0L);
- else
- aarch64_set_reg_u64 (cpu, rd, NO_SP, leading64 (value));
- }
- /* 32 bit reverse bits. */
- static void
- rbit32 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t result = 0;
- int i;
- for (i = 0; i < 32; i++)
- {
- result <<= 1;
- result |= (value & 1);
- value >>= 1;
- }
- aarch64_set_reg_u64 (cpu, rd, NO_SP, result);
- }
- /* 64 bit reverse bits. */
- static void
- rbit64 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t result = 0;
- int i;
- for (i = 0; i < 64; i++)
- {
- result <<= 1;
- result |= (value & 1UL);
- value >>= 1;
- }
- aarch64_set_reg_u64 (cpu, rd, NO_SP, result);
- }
- /* 32 bit reverse bytes. */
- static void
- rev32 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t result = 0;
- int i;
- for (i = 0; i < 4; i++)
- {
- result <<= 8;
- result |= (value & 0xff);
- value >>= 8;
- }
- aarch64_set_reg_u64 (cpu, rd, NO_SP, result);
- }
- /* 64 bit reverse bytes. */
- static void
- rev64 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t result = 0;
- int i;
- for (i = 0; i < 8; i++)
- {
- result <<= 8;
- result |= (value & 0xffULL);
- value >>= 8;
- }
- aarch64_set_reg_u64 (cpu, rd, NO_SP, result);
- }
- /* 32 bit reverse shorts. */
- /* N.B.this reverses the order of the bytes in each half word. */
- static void
- revh32 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint32_t result = 0;
- int i;
- for (i = 0; i < 2; i++)
- {
- result <<= 8;
- result |= (value & 0x00ff00ff);
- value >>= 8;
- }
- aarch64_set_reg_u64 (cpu, rd, NO_SP, result);
- }
- /* 64 bit reverse shorts. */
- /* N.B.this reverses the order of the bytes in each half word. */
- static void
- revh64 (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- uint64_t result = 0;
- int i;
- for (i = 0; i < 2; i++)
- {
- result <<= 8;
- result |= (value & 0x00ff00ff00ff00ffULL);
- value >>= 8;
- }
- aarch64_set_reg_u64 (cpu, rd, NO_SP, result);
- }
- static void
- dexDataProc1Source (sim_cpu *cpu)
- {
- /* instr[30] = 1
- instr[28,21] = 111010110
- instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[29] = S : 0 ==> ok, 1 ==> UNALLOC
- instr[20,16] = opcode2 : 00000 ==> ok, ow ==> UNALLOC
- instr[15,10] = opcode : 000000 ==> RBIT, 000001 ==> REV16,
- 000010 ==> REV, 000011 ==> UNALLOC
- 000100 ==> CLZ, 000101 ==> CLS
- ow ==> UNALLOC
- instr[9,5] = rn : may not be SP
- instr[4,0] = rd : may not be SP. */
- uint32_t S = INSTR (29, 29);
- uint32_t opcode2 = INSTR (20, 16);
- uint32_t opcode = INSTR (15, 10);
- uint32_t dispatch = ((INSTR (31, 31) << 3) | opcode);
- if (S == 1)
- HALT_UNALLOC;
- if (opcode2 != 0)
- HALT_UNALLOC;
- if (opcode & 0x38)
- HALT_UNALLOC;
- switch (dispatch)
- {
- case 0: rbit32 (cpu); return;
- case 1: revh32 (cpu); return;
- case 2: rev32 (cpu); return;
- case 4: clz32 (cpu); return;
- case 5: cls32 (cpu); return;
- case 8: rbit64 (cpu); return;
- case 9: revh64 (cpu); return;
- case 10:rev32 (cpu); return;
- case 11:rev64 (cpu); return;
- case 12:clz64 (cpu); return;
- case 13:cls64 (cpu); return;
- default: HALT_UNALLOC;
- }
- }
- /* Variable shift.
- Shifts by count supplied in register.
- N.B register args may not be SP.
- These all use the shifted auxiliary function for
- simplicity and clarity. Writing the actual shift
- inline would avoid a branch and so be faster but
- would also necessitate getting signs right. */
- /* 32 bit arithmetic shift right. */
- static void
- asrv32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- shifted32 (aarch64_get_reg_u32 (cpu, rn, NO_SP), ASR,
- (aarch64_get_reg_u32 (cpu, rm, NO_SP) & 0x1f)));
- }
- /* 64 bit arithmetic shift right. */
- static void
- asrv64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- shifted64 (aarch64_get_reg_u64 (cpu, rn, NO_SP), ASR,
- (aarch64_get_reg_u64 (cpu, rm, NO_SP) & 0x3f)));
- }
- /* 32 bit logical shift left. */
- static void
- lslv32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- shifted32 (aarch64_get_reg_u32 (cpu, rn, NO_SP), LSL,
- (aarch64_get_reg_u32 (cpu, rm, NO_SP) & 0x1f)));
- }
- /* 64 bit arithmetic shift left. */
- static void
- lslv64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- shifted64 (aarch64_get_reg_u64 (cpu, rn, NO_SP), LSL,
- (aarch64_get_reg_u64 (cpu, rm, NO_SP) & 0x3f)));
- }
- /* 32 bit logical shift right. */
- static void
- lsrv32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- shifted32 (aarch64_get_reg_u32 (cpu, rn, NO_SP), LSR,
- (aarch64_get_reg_u32 (cpu, rm, NO_SP) & 0x1f)));
- }
- /* 64 bit logical shift right. */
- static void
- lsrv64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- shifted64 (aarch64_get_reg_u64 (cpu, rn, NO_SP), LSR,
- (aarch64_get_reg_u64 (cpu, rm, NO_SP) & 0x3f)));
- }
- /* 32 bit rotate right. */
- static void
- rorv32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- shifted32 (aarch64_get_reg_u32 (cpu, rn, NO_SP), ROR,
- (aarch64_get_reg_u32 (cpu, rm, NO_SP) & 0x1f)));
- }
- /* 64 bit rotate right. */
- static void
- rorv64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- shifted64 (aarch64_get_reg_u64 (cpu, rn, NO_SP), ROR,
- (aarch64_get_reg_u64 (cpu, rm, NO_SP) & 0x3f)));
- }
- /* divide. */
- /* 32 bit signed divide. */
- static void
- cpuiv32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* N.B. the pseudo-code does the divide using 64 bit data. */
- /* TODO : check that this rounds towards zero as required. */
- int64_t dividend = aarch64_get_reg_s32 (cpu, rn, NO_SP);
- int64_t divisor = aarch64_get_reg_s32 (cpu, rm, NO_SP);
- aarch64_set_reg_s64 (cpu, rd, NO_SP,
- divisor ? ((int32_t) (dividend / divisor)) : 0);
- }
- /* 64 bit signed divide. */
- static void
- cpuiv64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* TODO : check that this rounds towards zero as required. */
- int64_t divisor = aarch64_get_reg_s64 (cpu, rm, NO_SP);
- aarch64_set_reg_s64
- (cpu, rd, NO_SP,
- divisor ? (aarch64_get_reg_s64 (cpu, rn, NO_SP) / divisor) : 0);
- }
- /* 32 bit unsigned divide. */
- static void
- udiv32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* N.B. the pseudo-code does the divide using 64 bit data. */
- uint64_t dividend = aarch64_get_reg_u32 (cpu, rn, NO_SP);
- uint64_t divisor = aarch64_get_reg_u32 (cpu, rm, NO_SP);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- divisor ? (uint32_t) (dividend / divisor) : 0);
- }
- /* 64 bit unsigned divide. */
- static void
- udiv64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* TODO : check that this rounds towards zero as required. */
- uint64_t divisor = aarch64_get_reg_u64 (cpu, rm, NO_SP);
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- divisor ? (aarch64_get_reg_u64 (cpu, rn, NO_SP) / divisor) : 0);
- }
- static void
- dexDataProc2Source (sim_cpu *cpu)
- {
- /* assert instr[30] == 0
- instr[28,21] == 11010110
- instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[29] = S : 0 ==> ok, 1 ==> UNALLOC
- instr[15,10] = opcode : 000010 ==> UDIV, 000011 ==> CPUIV,
- 001000 ==> LSLV, 001001 ==> LSRV
- 001010 ==> ASRV, 001011 ==> RORV
- ow ==> UNALLOC. */
- uint32_t dispatch;
- uint32_t S = INSTR (29, 29);
- uint32_t opcode = INSTR (15, 10);
- if (S == 1)
- HALT_UNALLOC;
- if (opcode & 0x34)
- HALT_UNALLOC;
- dispatch = ( (INSTR (31, 31) << 3)
- | (uimm (opcode, 3, 3) << 2)
- | uimm (opcode, 1, 0));
- switch (dispatch)
- {
- case 2: udiv32 (cpu); return;
- case 3: cpuiv32 (cpu); return;
- case 4: lslv32 (cpu); return;
- case 5: lsrv32 (cpu); return;
- case 6: asrv32 (cpu); return;
- case 7: rorv32 (cpu); return;
- case 10: udiv64 (cpu); return;
- case 11: cpuiv64 (cpu); return;
- case 12: lslv64 (cpu); return;
- case 13: lsrv64 (cpu); return;
- case 14: asrv64 (cpu); return;
- case 15: rorv64 (cpu); return;
- default: HALT_UNALLOC;
- }
- }
- /* Multiply. */
- /* 32 bit multiply and add. */
- static void
- madd32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned ra = INSTR (14, 10);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u32 (cpu, ra, NO_SP)
- + aarch64_get_reg_u32 (cpu, rn, NO_SP)
- * aarch64_get_reg_u32 (cpu, rm, NO_SP));
- }
- /* 64 bit multiply and add. */
- static void
- madd64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned ra = INSTR (14, 10);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u64 (cpu, ra, NO_SP)
- + (aarch64_get_reg_u64 (cpu, rn, NO_SP)
- * aarch64_get_reg_u64 (cpu, rm, NO_SP)));
- }
- /* 32 bit multiply and sub. */
- static void
- msub32 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned ra = INSTR (14, 10);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u32 (cpu, ra, NO_SP)
- - aarch64_get_reg_u32 (cpu, rn, NO_SP)
- * aarch64_get_reg_u32 (cpu, rm, NO_SP));
- }
- /* 64 bit multiply and sub. */
- static void
- msub64 (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned ra = INSTR (14, 10);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- aarch64_get_reg_u64 (cpu, ra, NO_SP)
- - aarch64_get_reg_u64 (cpu, rn, NO_SP)
- * aarch64_get_reg_u64 (cpu, rm, NO_SP));
- }
- /* Signed multiply add long -- source, source2 : 32 bit, source3 : 64 bit. */
- static void
- smaddl (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned ra = INSTR (14, 10);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* N.B. we need to multiply the signed 32 bit values in rn, rm to
- obtain a 64 bit product. */
- aarch64_set_reg_s64
- (cpu, rd, NO_SP,
- aarch64_get_reg_s64 (cpu, ra, NO_SP)
- + ((int64_t) aarch64_get_reg_s32 (cpu, rn, NO_SP))
- * ((int64_t) aarch64_get_reg_s32 (cpu, rm, NO_SP)));
- }
- /* Signed multiply sub long -- source, source2 : 32 bit, source3 : 64 bit. */
- static void
- smsubl (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned ra = INSTR (14, 10);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- /* N.B. we need to multiply the signed 32 bit values in rn, rm to
- obtain a 64 bit product. */
- aarch64_set_reg_s64
- (cpu, rd, NO_SP,
- aarch64_get_reg_s64 (cpu, ra, NO_SP)
- - ((int64_t) aarch64_get_reg_s32 (cpu, rn, NO_SP))
- * ((int64_t) aarch64_get_reg_s32 (cpu, rm, NO_SP)));
- }
- /* Integer Multiply/Divide. */
- /* First some macros and a helper function. */
- /* Macros to test or access elements of 64 bit words. */
- /* Mask used to access lo 32 bits of 64 bit unsigned int. */
- #define LOW_WORD_MASK ((1ULL << 32) - 1)
- /* Return the lo 32 bit word of a 64 bit unsigned int as a 64 bit unsigned int. */
- #define lowWordToU64(_value_u64) ((_value_u64) & LOW_WORD_MASK)
- /* Return the hi 32 bit word of a 64 bit unsigned int as a 64 bit unsigned int. */
- #define highWordToU64(_value_u64) ((_value_u64) >> 32)
- /* Offset of sign bit in 64 bit signed integger. */
- #define SIGN_SHIFT_U64 63
- /* The sign bit itself -- also identifies the minimum negative int value. */
- #define SIGN_BIT_U64 (1UL << SIGN_SHIFT_U64)
- /* Return true if a 64 bit signed int presented as an unsigned int is the
- most negative value. */
- #define isMinimumU64(_value_u64) ((_value_u64) == SIGN_BIT_U64)
- /* Return true (non-zero) if a 64 bit signed int presented as an unsigned
- int has its sign bit set to false. */
- #define isSignSetU64(_value_u64) ((_value_u64) & SIGN_BIT_U64)
- /* Return 1L or -1L according to whether a 64 bit signed int presented as
- an unsigned int has its sign bit set or not. */
- #define signOfU64(_value_u64) (1L + (((value_u64) >> SIGN_SHIFT_U64) * -2L)
- /* Clear the sign bit of a 64 bit signed int presented as an unsigned int. */
- #define clearSignU64(_value_u64) ((_value_u64) &= ~SIGN_BIT_U64)
- /* Multiply two 64 bit ints and return.
- the hi 64 bits of the 128 bit product. */
- static uint64_t
- mul64hi (uint64_t value1, uint64_t value2)
- {
- uint64_t resultmid1;
- uint64_t result;
- uint64_t value1_lo = lowWordToU64 (value1);
- uint64_t value1_hi = highWordToU64 (value1) ;
- uint64_t value2_lo = lowWordToU64 (value2);
- uint64_t value2_hi = highWordToU64 (value2);
- /* Cross-multiply and collect results. */
- uint64_t xproductlo = value1_lo * value2_lo;
- uint64_t xproductmid1 = value1_lo * value2_hi;
- uint64_t xproductmid2 = value1_hi * value2_lo;
- uint64_t xproducthi = value1_hi * value2_hi;
- uint64_t carry = 0;
- /* Start accumulating 64 bit results. */
- /* Drop bottom half of lowest cross-product. */
- uint64_t resultmid = xproductlo >> 32;
- /* Add in middle products. */
- resultmid = resultmid + xproductmid1;
- /* Check for overflow. */
- if (resultmid < xproductmid1)
- /* Carry over 1 into top cross-product. */
- carry++;
- resultmid1 = resultmid + xproductmid2;
- /* Check for overflow. */
- if (resultmid1 < xproductmid2)
- /* Carry over 1 into top cross-product. */
- carry++;
- /* Drop lowest 32 bits of middle cross-product. */
- result = resultmid1 >> 32;
- /* Move carry bit to just above middle cross-product highest bit. */
- carry = carry << 32;
- /* Add top cross-product plus and any carry. */
- result += xproducthi + carry;
- return result;
- }
- /* Signed multiply high, source, source2 :
- 64 bit, dest <-- high 64-bit of result. */
- static void
- smulh (sim_cpu *cpu)
- {
- uint64_t uresult;
- int64_t result;
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- GReg ra = INSTR (14, 10);
- int64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- int64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP);
- uint64_t uvalue1;
- uint64_t uvalue2;
- int negate = 0;
- if (ra != R31)
- HALT_UNALLOC;
- /* Convert to unsigned and use the unsigned mul64hi routine
- the fix the sign up afterwards. */
- if (value1 < 0)
- {
- negate = !negate;
- uvalue1 = -value1;
- }
- else
- {
- uvalue1 = value1;
- }
- if (value2 < 0)
- {
- negate = !negate;
- uvalue2 = -value2;
- }
- else
- {
- uvalue2 = value2;
- }
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- uresult = mul64hi (uvalue1, uvalue2);
- result = uresult;
- if (negate)
- {
- /* Multiply 128-bit result by -1, which means highpart gets inverted,
- and has carry in added only if low part is 0. */
- result = ~result;
- if ((uvalue1 * uvalue2) == 0)
- result += 1;
- }
- aarch64_set_reg_s64 (cpu, rd, NO_SP, result);
- }
- /* Unsigned multiply add long -- source, source2 :
- 32 bit, source3 : 64 bit. */
- static void
- umaddl (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned ra = INSTR (14, 10);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* N.B. we need to multiply the signed 32 bit values in rn, rm to
- obtain a 64 bit product. */
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- aarch64_get_reg_u64 (cpu, ra, NO_SP)
- + ((uint64_t) aarch64_get_reg_u32 (cpu, rn, NO_SP))
- * ((uint64_t) aarch64_get_reg_u32 (cpu, rm, NO_SP)));
- }
- /* Unsigned multiply sub long -- source, source2 : 32 bit, source3 : 64 bit. */
- static void
- umsubl (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned ra = INSTR (14, 10);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- /* N.B. we need to multiply the signed 32 bit values in rn, rm to
- obtain a 64 bit product. */
- aarch64_set_reg_u64
- (cpu, rd, NO_SP,
- aarch64_get_reg_u64 (cpu, ra, NO_SP)
- - ((uint64_t) aarch64_get_reg_u32 (cpu, rn, NO_SP))
- * ((uint64_t) aarch64_get_reg_u32 (cpu, rm, NO_SP)));
- }
- /* Unsigned multiply high, source, source2 :
- 64 bit, dest <-- high 64-bit of result. */
- static void
- umulh (sim_cpu *cpu)
- {
- unsigned rm = INSTR (20, 16);
- unsigned rn = INSTR (9, 5);
- unsigned rd = INSTR (4, 0);
- GReg ra = INSTR (14, 10);
- if (ra != R31)
- HALT_UNALLOC;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rd, NO_SP,
- mul64hi (aarch64_get_reg_u64 (cpu, rn, NO_SP),
- aarch64_get_reg_u64 (cpu, rm, NO_SP)));
- }
- static void
- dexDataProc3Source (sim_cpu *cpu)
- {
- /* assert instr[28,24] == 11011. */
- /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit (for rd at least)
- instr[30,29] = op54 : 00 ==> ok, ow ==> UNALLOC
- instr[23,21] = op31 : 111 ==> UNALLOC, o2 ==> ok
- instr[15] = o0 : 0/1 ==> ok
- instr[23,21:15] ==> op : 0000 ==> MADD, 0001 ==> MSUB, (32/64 bit)
- 0010 ==> SMADDL, 0011 ==> SMSUBL, (64 bit only)
- 0100 ==> SMULH, (64 bit only)
- 1010 ==> UMADDL, 1011 ==> UNSUBL, (64 bit only)
- 1100 ==> UMULH (64 bit only)
- ow ==> UNALLOC. */
- uint32_t dispatch;
- uint32_t size = INSTR (31, 31);
- uint32_t op54 = INSTR (30, 29);
- uint32_t op31 = INSTR (23, 21);
- uint32_t o0 = INSTR (15, 15);
- if (op54 != 0)
- HALT_UNALLOC;
- if (size == 0)
- {
- if (op31 != 0)
- HALT_UNALLOC;
- if (o0 == 0)
- madd32 (cpu);
- else
- msub32 (cpu);
- return;
- }
- dispatch = (op31 << 1) | o0;
- switch (dispatch)
- {
- case 0: madd64 (cpu); return;
- case 1: msub64 (cpu); return;
- case 2: smaddl (cpu); return;
- case 3: smsubl (cpu); return;
- case 4: smulh (cpu); return;
- case 10: umaddl (cpu); return;
- case 11: umsubl (cpu); return;
- case 12: umulh (cpu); return;
- default: HALT_UNALLOC;
- }
- }
- static void
- dexDPReg (sim_cpu *cpu)
- {
- /* uint32_t group = dispatchGroup (aarch64_get_instr (cpu));
- assert group == GROUP_DPREG_0101 || group == GROUP_DPREG_1101
- bits [28:24:21] of a DPReg are the secondary dispatch vector. */
- uint32_t group2 = dispatchDPReg (aarch64_get_instr (cpu));
- switch (group2)
- {
- case DPREG_LOG_000:
- case DPREG_LOG_001:
- dexLogicalShiftedRegister (cpu); return;
- case DPREG_ADDSHF_010:
- dexAddSubtractShiftedRegister (cpu); return;
- case DPREG_ADDEXT_011:
- dexAddSubtractExtendedRegister (cpu); return;
- case DPREG_ADDCOND_100:
- {
- /* This set bundles a variety of different operations. */
- /* Check for. */
- /* 1) add/sub w carry. */
- uint32_t mask1 = 0x1FE00000U;
- uint32_t val1 = 0x1A000000U;
- /* 2) cond compare register/immediate. */
- uint32_t mask2 = 0x1FE00000U;
- uint32_t val2 = 0x1A400000U;
- /* 3) cond select. */
- uint32_t mask3 = 0x1FE00000U;
- uint32_t val3 = 0x1A800000U;
- /* 4) data proc 1/2 source. */
- uint32_t mask4 = 0x1FE00000U;
- uint32_t val4 = 0x1AC00000U;
- if ((aarch64_get_instr (cpu) & mask1) == val1)
- dexAddSubtractWithCarry (cpu);
- else if ((aarch64_get_instr (cpu) & mask2) == val2)
- CondCompare (cpu);
- else if ((aarch64_get_instr (cpu) & mask3) == val3)
- dexCondSelect (cpu);
- else if ((aarch64_get_instr (cpu) & mask4) == val4)
- {
- /* Bit 30 is clear for data proc 2 source
- and set for data proc 1 source. */
- if (aarch64_get_instr (cpu) & (1U << 30))
- dexDataProc1Source (cpu);
- else
- dexDataProc2Source (cpu);
- }
- else
- /* Should not reach here. */
- HALT_NYI;
- return;
- }
- case DPREG_3SRC_110:
- dexDataProc3Source (cpu); return;
- case DPREG_UNALLOC_101:
- HALT_UNALLOC;
- case DPREG_3SRC_111:
- dexDataProc3Source (cpu); return;
- default:
- /* Should never reach here. */
- HALT_NYI;
- }
- }
- /* Unconditional Branch immediate.
- Offset is a PC-relative byte offset in the range +/- 128MiB.
- The offset is assumed to be raw from the decode i.e. the
- simulator is expected to scale them from word offsets to byte. */
- /* Unconditional branch. */
- static void
- buc (sim_cpu *cpu, int32_t offset)
- {
- aarch64_set_next_PC_by_offset (cpu, offset);
- }
- static unsigned stack_depth = 0;
- /* Unconditional branch and link -- writes return PC to LR. */
- static void
- bl (sim_cpu *cpu, int32_t offset)
- {
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_save_LR (cpu);
- aarch64_set_next_PC_by_offset (cpu, offset);
- if (TRACE_BRANCH_P (cpu))
- {
- ++ stack_depth;
- TRACE_BRANCH (cpu,
- " %*scall %" PRIx64 " [%s]"
- " [args: %" PRIx64 " %" PRIx64 " %" PRIx64 "]",
- stack_depth, " ", aarch64_get_next_PC (cpu),
- aarch64_get_func (CPU_STATE (cpu),
- aarch64_get_next_PC (cpu)),
- aarch64_get_reg_u64 (cpu, 0, NO_SP),
- aarch64_get_reg_u64 (cpu, 1, NO_SP),
- aarch64_get_reg_u64 (cpu, 2, NO_SP)
- );
- }
- }
- /* Unconditional Branch register.
- Branch/return address is in source register. */
- /* Unconditional branch. */
- static void
- br (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_next_PC (cpu, aarch64_get_reg_u64 (cpu, rn, NO_SP));
- }
- /* Unconditional branch and link -- writes return PC to LR. */
- static void
- blr (sim_cpu *cpu)
- {
- /* Ensure we read the destination before we write LR. */
- uint64_t target = aarch64_get_reg_u64 (cpu, INSTR (9, 5), NO_SP);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_save_LR (cpu);
- aarch64_set_next_PC (cpu, target);
- if (TRACE_BRANCH_P (cpu))
- {
- ++ stack_depth;
- TRACE_BRANCH (cpu,
- " %*scall %" PRIx64 " [%s]"
- " [args: %" PRIx64 " %" PRIx64 " %" PRIx64 "]",
- stack_depth, " ", aarch64_get_next_PC (cpu),
- aarch64_get_func (CPU_STATE (cpu),
- aarch64_get_next_PC (cpu)),
- aarch64_get_reg_u64 (cpu, 0, NO_SP),
- aarch64_get_reg_u64 (cpu, 1, NO_SP),
- aarch64_get_reg_u64 (cpu, 2, NO_SP)
- );
- }
- }
- /* Return -- assembler will default source to LR this is functionally
- equivalent to br but, presumably, unlike br it side effects the
- branch predictor. */
- static void
- ret (sim_cpu *cpu)
- {
- unsigned rn = INSTR (9, 5);
- aarch64_set_next_PC (cpu, aarch64_get_reg_u64 (cpu, rn, NO_SP));
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (TRACE_BRANCH_P (cpu))
- {
- TRACE_BRANCH (cpu,
- " %*sreturn [result: %" PRIx64 "]",
- stack_depth, " ", aarch64_get_reg_u64 (cpu, 0, NO_SP));
- -- stack_depth;
- }
- }
- /* NOP -- we implement this and call it from the decode in case we
- want to intercept it later. */
- static void
- nop (sim_cpu *cpu)
- {
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- }
- /* Data synchronization barrier. */
- static void
- dsb (sim_cpu *cpu)
- {
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- }
- /* Data memory barrier. */
- static void
- dmb (sim_cpu *cpu)
- {
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- }
- /* Instruction synchronization barrier. */
- static void
- isb (sim_cpu *cpu)
- {
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- }
- static void
- dexBranchImmediate (sim_cpu *cpu)
- {
- /* assert instr[30,26] == 00101
- instr[31] ==> 0 == B, 1 == BL
- instr[25,0] == imm26 branch offset counted in words. */
- uint32_t top = INSTR (31, 31);
- /* We have a 26 byte signed word offset which we need to pass to the
- execute routine as a signed byte offset. */
- int32_t offset = simm32 (aarch64_get_instr (cpu), 25, 0) << 2;
- if (top)
- bl (cpu, offset);
- else
- buc (cpu, offset);
- }
- /* Control Flow. */
- /* Conditional branch
- Offset is a PC-relative byte offset in the range +/- 1MiB pos is
- a bit position in the range 0 .. 63
- cc is a CondCode enum value as pulled out of the decode
- N.B. any offset register (source) can only be Xn or Wn. */
- static void
- bcc (sim_cpu *cpu, int32_t offset, CondCode cc)
- {
- /* The test returns TRUE if CC is met. */
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (testConditionCode (cpu, cc))
- aarch64_set_next_PC_by_offset (cpu, offset);
- }
- /* 32 bit branch on register non-zero. */
- static void
- cbnz32 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (aarch64_get_reg_u32 (cpu, rt, NO_SP) != 0)
- aarch64_set_next_PC_by_offset (cpu, offset);
- }
- /* 64 bit branch on register zero. */
- static void
- cbnz (sim_cpu *cpu, int32_t offset)
- {
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (aarch64_get_reg_u64 (cpu, rt, NO_SP) != 0)
- aarch64_set_next_PC_by_offset (cpu, offset);
- }
- /* 32 bit branch on register non-zero. */
- static void
- cbz32 (sim_cpu *cpu, int32_t offset)
- {
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (aarch64_get_reg_u32 (cpu, rt, NO_SP) == 0)
- aarch64_set_next_PC_by_offset (cpu, offset);
- }
- /* 64 bit branch on register zero. */
- static void
- cbz (sim_cpu *cpu, int32_t offset)
- {
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (aarch64_get_reg_u64 (cpu, rt, NO_SP) == 0)
- aarch64_set_next_PC_by_offset (cpu, offset);
- }
- /* Branch on register bit test non-zero -- one size fits all. */
- static void
- tbnz (sim_cpu *cpu, uint32_t pos, int32_t offset)
- {
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (aarch64_get_reg_u64 (cpu, rt, NO_SP) & (((uint64_t) 1) << pos))
- aarch64_set_next_PC_by_offset (cpu, offset);
- }
- /* Branch on register bit test zero -- one size fits all. */
- static void
- tbz (sim_cpu *cpu, uint32_t pos, int32_t offset)
- {
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (!(aarch64_get_reg_u64 (cpu, rt, NO_SP) & (((uint64_t) 1) << pos)))
- aarch64_set_next_PC_by_offset (cpu, offset);
- }
- static void
- dexCompareBranchImmediate (sim_cpu *cpu)
- {
- /* instr[30,25] = 01 1010
- instr[31] = size : 0 ==> 32, 1 ==> 64
- instr[24] = op : 0 ==> CBZ, 1 ==> CBNZ
- instr[23,5] = simm19 branch offset counted in words
- instr[4,0] = rt */
- uint32_t size = INSTR (31, 31);
- uint32_t op = INSTR (24, 24);
- int32_t offset = simm32 (aarch64_get_instr (cpu), 23, 5) << 2;
- if (size == 0)
- {
- if (op == 0)
- cbz32 (cpu, offset);
- else
- cbnz32 (cpu, offset);
- }
- else
- {
- if (op == 0)
- cbz (cpu, offset);
- else
- cbnz (cpu, offset);
- }
- }
- static void
- dexTestBranchImmediate (sim_cpu *cpu)
- {
- /* instr[31] = b5 : bit 5 of test bit idx
- instr[30,25] = 01 1011
- instr[24] = op : 0 ==> TBZ, 1 == TBNZ
- instr[23,19] = b40 : bits 4 to 0 of test bit idx
- instr[18,5] = simm14 : signed offset counted in words
- instr[4,0] = uimm5 */
- uint32_t pos = ((INSTR (31, 31) << 5) | INSTR (23, 19));
- int32_t offset = simm32 (aarch64_get_instr (cpu), 18, 5) << 2;
- NYI_assert (30, 25, 0x1b);
- if (INSTR (24, 24) == 0)
- tbz (cpu, pos, offset);
- else
- tbnz (cpu, pos, offset);
- }
- static void
- dexCondBranchImmediate (sim_cpu *cpu)
- {
- /* instr[31,25] = 010 1010
- instr[24] = op1; op => 00 ==> B.cond
- instr[23,5] = simm19 : signed offset counted in words
- instr[4] = op0
- instr[3,0] = cond */
- int32_t offset;
- uint32_t op = ((INSTR (24, 24) << 1) | INSTR (4, 4));
- NYI_assert (31, 25, 0x2a);
- if (op != 0)
- HALT_UNALLOC;
- offset = simm32 (aarch64_get_instr (cpu), 23, 5) << 2;
- bcc (cpu, offset, INSTR (3, 0));
- }
- static void
- dexBranchRegister (sim_cpu *cpu)
- {
- /* instr[31,25] = 110 1011
- instr[24,21] = op : 0 ==> BR, 1 => BLR, 2 => RET, 3 => ERET, 4 => DRPS
- instr[20,16] = op2 : must be 11111
- instr[15,10] = op3 : must be 000000
- instr[4,0] = op2 : must be 11111. */
- uint32_t op = INSTR (24, 21);
- uint32_t op2 = INSTR (20, 16);
- uint32_t op3 = INSTR (15, 10);
- uint32_t op4 = INSTR (4, 0);
- NYI_assert (31, 25, 0x6b);
- if (op2 != 0x1F || op3 != 0 || op4 != 0)
- HALT_UNALLOC;
- if (op == 0)
- br (cpu);
- else if (op == 1)
- blr (cpu);
- else if (op == 2)
- ret (cpu);
- else
- {
- /* ERET and DRPS accept 0b11111 for rn = instr [4,0]. */
- /* anything else is unallocated. */
- uint32_t rn = INSTR (4, 0);
- if (rn != 0x1f)
- HALT_UNALLOC;
- if (op == 4 || op == 5)
- HALT_NYI;
- HALT_UNALLOC;
- }
- }
- /* FIXME: We should get the Angel SWI values from ../../libgloss/aarch64/svc.h
- but this may not be available. So instead we define the values we need
- here. */
- #define AngelSVC_Reason_Open 0x01
- #define AngelSVC_Reason_Close 0x02
- #define AngelSVC_Reason_Write 0x05
- #define AngelSVC_Reason_Read 0x06
- #define AngelSVC_Reason_IsTTY 0x09
- #define AngelSVC_Reason_Seek 0x0A
- #define AngelSVC_Reason_FLen 0x0C
- #define AngelSVC_Reason_Remove 0x0E
- #define AngelSVC_Reason_Rename 0x0F
- #define AngelSVC_Reason_Clock 0x10
- #define AngelSVC_Reason_Time 0x11
- #define AngelSVC_Reason_System 0x12
- #define AngelSVC_Reason_Errno 0x13
- #define AngelSVC_Reason_GetCmdLine 0x15
- #define AngelSVC_Reason_HeapInfo 0x16
- #define AngelSVC_Reason_ReportException 0x18
- #define AngelSVC_Reason_Elapsed 0x30
- static void
- handle_halt (sim_cpu *cpu, uint32_t val)
- {
- uint64_t result = 0;
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- if (val != 0xf000)
- {
- TRACE_SYSCALL (cpu, " HLT [0x%x]", val);
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
- sim_stopped, SIM_SIGTRAP);
- }
- /* We have encountered an Angel SVC call. See if we can process it. */
- switch (aarch64_get_reg_u32 (cpu, 0, NO_SP))
- {
- case AngelSVC_Reason_HeapInfo:
- {
- /* Get the values. */
- uint64_t stack_top = aarch64_get_stack_start (cpu);
- uint64_t heap_base = aarch64_get_heap_start (cpu);
- /* Get the pointer */
- uint64_t ptr = aarch64_get_reg_u64 (cpu, 1, SP_OK);
- ptr = aarch64_get_mem_u64 (cpu, ptr);
- /* Fill in the memory block. */
- /* Start addr of heap. */
- aarch64_set_mem_u64 (cpu, ptr + 0, heap_base);
- /* End addr of heap. */
- aarch64_set_mem_u64 (cpu, ptr + 8, stack_top);
- /* Lowest stack addr. */
- aarch64_set_mem_u64 (cpu, ptr + 16, heap_base);
- /* Initial stack addr. */
- aarch64_set_mem_u64 (cpu, ptr + 24, stack_top);
- TRACE_SYSCALL (cpu, " AngelSVC: Get Heap Info");
- }
- break;
- case AngelSVC_Reason_Open:
- {
- /* Get the pointer */
- /* uint64_t ptr = aarch64_get_reg_u64 (cpu, 1, SP_OK);. */
- /* FIXME: For now we just assume that we will only be asked
- to open the standard file descriptors. */
- static int fd = 0;
- result = fd ++;
- TRACE_SYSCALL (cpu, " AngelSVC: Open file %d", fd - 1);
- }
- break;
- case AngelSVC_Reason_Close:
- {
- uint64_t fh = aarch64_get_reg_u64 (cpu, 1, SP_OK);
- TRACE_SYSCALL (cpu, " AngelSVC: Close file %d", (int) fh);
- result = 0;
- }
- break;
- case AngelSVC_Reason_Errno:
- result = 0;
- TRACE_SYSCALL (cpu, " AngelSVC: Get Errno");
- break;
- case AngelSVC_Reason_Clock:
- result =
- #ifdef CLOCKS_PER_SEC
- (CLOCKS_PER_SEC >= 100)
- ? (clock () / (CLOCKS_PER_SEC / 100))
- : ((clock () * 100) / CLOCKS_PER_SEC)
- #else
- /* Presume unix... clock() returns microseconds. */
- (clock () / 10000)
- #endif
- ;
- TRACE_SYSCALL (cpu, " AngelSVC: Get Clock");
- break;
- case AngelSVC_Reason_GetCmdLine:
- {
- /* Get the pointer */
- uint64_t ptr = aarch64_get_reg_u64 (cpu, 1, SP_OK);
- ptr = aarch64_get_mem_u64 (cpu, ptr);
- /* FIXME: No command line for now. */
- aarch64_set_mem_u64 (cpu, ptr, 0);
- TRACE_SYSCALL (cpu, " AngelSVC: Get Command Line");
- }
- break;
- case AngelSVC_Reason_IsTTY:
- result = 1;
- TRACE_SYSCALL (cpu, " AngelSVC: IsTTY ?");
- break;
- case AngelSVC_Reason_Write:
- {
- /* Get the pointer */
- uint64_t ptr = aarch64_get_reg_u64 (cpu, 1, SP_OK);
- /* Get the write control block. */
- uint64_t fd = aarch64_get_mem_u64 (cpu, ptr);
- uint64_t buf = aarch64_get_mem_u64 (cpu, ptr + 8);
- uint64_t len = aarch64_get_mem_u64 (cpu, ptr + 16);
- TRACE_SYSCALL (cpu, "write of %" PRIx64 " bytes from %"
- PRIx64 " on descriptor %" PRIx64,
- len, buf, fd);
- if (len > 1280)
- {
- TRACE_SYSCALL (cpu,
- " AngelSVC: Write: Suspiciously long write: %ld",
- (long) len);
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
- sim_stopped, SIM_SIGBUS);
- }
- else if (fd == 1)
- {
- printf ("%.*s", (int) len, aarch64_get_mem_ptr (cpu, buf));
- }
- else if (fd == 2)
- {
- TRACE (cpu, 0, "\n");
- sim_io_eprintf (CPU_STATE (cpu), "%.*s",
- (int) len, aarch64_get_mem_ptr (cpu, buf));
- TRACE (cpu, 0, "\n");
- }
- else
- {
- TRACE_SYSCALL (cpu,
- " AngelSVC: Write: Unexpected file handle: %d",
- (int) fd);
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
- sim_stopped, SIM_SIGABRT);
- }
- }
- break;
- case AngelSVC_Reason_ReportException:
- {
- /* Get the pointer */
- uint64_t ptr = aarch64_get_reg_u64 (cpu, 1, SP_OK);
- /*ptr = aarch64_get_mem_u64 (cpu, ptr);. */
- uint64_t type = aarch64_get_mem_u64 (cpu, ptr);
- uint64_t state = aarch64_get_mem_u64 (cpu, ptr + 8);
- TRACE_SYSCALL (cpu,
- "Angel Exception: type 0x%" PRIx64 " state %" PRIx64,
- type, state);
- if (type == 0x20026)
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
- sim_exited, state);
- else
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
- sim_stopped, SIM_SIGINT);
- }
- break;
- case AngelSVC_Reason_Read:
- case AngelSVC_Reason_FLen:
- case AngelSVC_Reason_Seek:
- case AngelSVC_Reason_Remove:
- case AngelSVC_Reason_Time:
- case AngelSVC_Reason_System:
- case AngelSVC_Reason_Rename:
- case AngelSVC_Reason_Elapsed:
- default:
- TRACE_SYSCALL (cpu, " HLT [Unknown angel %x]",
- aarch64_get_reg_u32 (cpu, 0, NO_SP));
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
- sim_stopped, SIM_SIGTRAP);
- }
- aarch64_set_reg_u64 (cpu, 0, NO_SP, result);
- }
- static void
- dexExcpnGen (sim_cpu *cpu)
- {
- /* instr[31:24] = 11010100
- instr[23,21] = opc : 000 ==> GEN EXCPN, 001 ==> BRK
- 010 ==> HLT, 101 ==> DBG GEN EXCPN
- instr[20,5] = imm16
- instr[4,2] = opc2 000 ==> OK, ow ==> UNALLOC
- instr[1,0] = LL : discriminates opc */
- uint32_t opc = INSTR (23, 21);
- uint32_t imm16 = INSTR (20, 5);
- uint32_t opc2 = INSTR (4, 2);
- uint32_t LL;
- NYI_assert (31, 24, 0xd4);
- if (opc2 != 0)
- HALT_UNALLOC;
- LL = INSTR (1, 0);
- /* We only implement HLT and BRK for now. */
- if (opc == 1 && LL == 0)
- {
- TRACE_EVENTS (cpu, " BRK [0x%x]", imm16);
- sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),
- sim_exited, aarch64_get_reg_s32 (cpu, R0, SP_OK));
- }
- if (opc == 2 && LL == 0)
- handle_halt (cpu, imm16);
- else if (opc == 0 || opc == 5)
- HALT_NYI;
- else
- HALT_UNALLOC;
- }
- /* Stub for accessing system registers. */
- static uint64_t
- system_get (sim_cpu *cpu, unsigned op0, unsigned op1, unsigned crn,
- unsigned crm, unsigned op2)
- {
- if (crn == 0 && op1 == 3 && crm == 0 && op2 == 7)
- /* DCZID_EL0 - the Data Cache Zero ID register.
- We do not support DC ZVA at the moment, so
- we return a value with the disable bit set.
- We implement support for the DCZID register since
- it is used by the C library's memset function. */
- return ((uint64_t) 1) << 4;
- if (crn == 0 && op1 == 3 && crm == 0 && op2 == 1)
- /* Cache Type Register. */
- return 0x80008000UL;
- if (crn == 13 && op1 == 3 && crm == 0 && op2 == 2)
- /* TPIDR_EL0 - thread pointer id. */
- return aarch64_get_thread_id (cpu);
- if (op1 == 3 && crm == 4 && op2 == 0)
- return aarch64_get_FPCR (cpu);
- if (op1 == 3 && crm == 4 && op2 == 1)
- return aarch64_get_FPSR (cpu);
- else if (op1 == 3 && crm == 2 && op2 == 0)
- return aarch64_get_CPSR (cpu);
- HALT_NYI;
- }
- static void
- system_set (sim_cpu *cpu, unsigned op0, unsigned op1, unsigned crn,
- unsigned crm, unsigned op2, uint64_t val)
- {
- if (op1 == 3 && crm == 4 && op2 == 0)
- aarch64_set_FPCR (cpu, val);
- else if (op1 == 3 && crm == 4 && op2 == 1)
- aarch64_set_FPSR (cpu, val);
- else if (op1 == 3 && crm == 2 && op2 == 0)
- aarch64_set_CPSR (cpu, val);
- else
- HALT_NYI;
- }
- static void
- do_mrs (sim_cpu *cpu)
- {
- /* instr[31:20] = 1101 0101 0001 1
- instr[19] = op0
- instr[18,16] = op1
- instr[15,12] = CRn
- instr[11,8] = CRm
- instr[7,5] = op2
- instr[4,0] = Rt */
- unsigned sys_op0 = INSTR (19, 19) + 2;
- unsigned sys_op1 = INSTR (18, 16);
- unsigned sys_crn = INSTR (15, 12);
- unsigned sys_crm = INSTR (11, 8);
- unsigned sys_op2 = INSTR (7, 5);
- unsigned rt = INSTR (4, 0);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- aarch64_set_reg_u64 (cpu, rt, NO_SP,
- system_get (cpu, sys_op0, sys_op1, sys_crn, sys_crm, sys_op2));
- }
- static void
- do_MSR_immediate (sim_cpu *cpu)
- {
- /* instr[31:19] = 1101 0101 0000 0
- instr[18,16] = op1
- instr[15,12] = 0100
- instr[11,8] = CRm
- instr[7,5] = op2
- instr[4,0] = 1 1111 */
- unsigned op1 = INSTR (18, 16);
- /*unsigned crm = INSTR (11, 8);*/
- unsigned op2 = INSTR (7, 5);
- NYI_assert (31, 19, 0x1AA0);
- NYI_assert (15, 12, 0x4);
- NYI_assert (4, 0, 0x1F);
- if (op1 == 0)
- {
- if (op2 == 5)
- HALT_NYI; /* set SPSel. */
- else
- HALT_UNALLOC;
- }
- else if (op1 == 3)
- {
- if (op2 == 6)
- HALT_NYI; /* set DAIFset. */
- else if (op2 == 7)
- HALT_NYI; /* set DAIFclr. */
- else
- HALT_UNALLOC;
- }
- else
- HALT_UNALLOC;
- }
- static void
- do_MSR_reg (sim_cpu *cpu)
- {
- /* instr[31:20] = 1101 0101 0001
- instr[19] = op0
- instr[18,16] = op1
- instr[15,12] = CRn
- instr[11,8] = CRm
- instr[7,5] = op2
- instr[4,0] = Rt */
- unsigned sys_op0 = INSTR (19, 19) + 2;
- unsigned sys_op1 = INSTR (18, 16);
- unsigned sys_crn = INSTR (15, 12);
- unsigned sys_crm = INSTR (11, 8);
- unsigned sys_op2 = INSTR (7, 5);
- unsigned rt = INSTR (4, 0);
- NYI_assert (31, 20, 0xD51);
- TRACE_DECODE (cpu, "emulated at line %d", __LINE__);
- system_set (cpu, sys_op0, sys_op1, sys_crn, sys_crm, sys_op2,
- aarch64_get_reg_u64 (cpu, rt, NO_SP));
- }
- static void
- do_SYS (sim_cpu *cpu)
- {
- /* instr[31,19] = 1101 0101 0000 1
- instr[18,16] = op1
- instr[15,12] = CRn
- instr[11,8] = CRm
- instr[7,5] = op2
- instr[4,0] = Rt */
- NYI_assert (31, 19, 0x1AA1);
- /* FIXME: For now we just silently accept system ops. */
- }
- static void
- dexSystem (sim_cpu *cpu)
- {
- /* instr[31:22] = 1101 01010 0
- instr[21] = L
- instr[20,19] = op0
- instr[18,16] = op1
- instr[15,12] = CRn
- instr[11,8] = CRm
- instr[7,5] = op2
- instr[4,0] = uimm5 */
- /* We are interested in HINT, DSB, DMB and ISB
- Hint #0 encodes NOOP (this is the only hint we care about)
- L == 0, op0 == 0, op1 = 011, CRn = 0010, Rt = 11111,
- CRm op2 != 0000 000 OR CRm op2 == 0000 000 || CRm op > 0000 101
- DSB, DMB, ISB are data store barrier, data memory barrier and
- instruction store barrier, respectively, where
- L == 0, op0 == 0, op1 = 011, CRn = 0011, Rt = 11111,
- op2 : DSB ==> 100, DMB ==> 101, ISB ==> 110
- CRm<3:2> ==> domain, CRm<1:0> ==> types,
- domain : 00 ==> OuterShareable, 01 ==> Nonshareable,
- 10 ==> InerShareable, 11 ==> FullSystem
- types : 01 ==> Reads, 10 ==> Writes,
- 11 ==> All, 00 ==> All (domain == FullSystem). */
- unsigned rt = INSTR (4, 0);
- NYI_assert (31, 22, 0x354);
- switch (INSTR (21, 12))
- {
- case 0x032:
- if (rt == 0x1F)
- {
- /* NOP has CRm != 0000 OR. */
- /* (CRm == 0000 AND (op2 == 000 OR op2 > 101)). */
- uint32_t crm = INSTR (11, 8);
- uint32_t op2 = INSTR (7, 5);
- if (crm != 0 || (op2 == 0 || op2 > 5))
- {
- /* Actually call nop method so we can reimplement it later. */
- nop (cpu);
- return;
- }
- }
- HALT_NYI;
- case 0x033:
- {
- uint32_t op2 = INSTR (7, 5);
- switch (op2)
- {
- case 2: HALT_NYI;
- case 4: dsb (cpu); return;
- case 5: dmb (cpu); return;
- case 6: isb (cpu); return;
- default: HALT_UNALLOC;
- }
- }
- case 0x3B0:
- case 0x3B4:
- case 0x3BD:
- do_mrs (cpu);
- return;
- case 0x0B7:
- do_SYS (cpu); /* DC is an alias of SYS. */
- return;
- default:
- if (INSTR (21, 20) == 0x1)
- do_MSR_reg (cpu);
- else if (INSTR (21, 19) == 0 && INSTR (15, 12) == 0x4)
- do_MSR_immediate (cpu);
- else
- HALT_NYI;
- return;
- }
- }
- static void
- dexBr (sim_cpu *cpu)
- {
- /* uint32_t group = dispatchGroup (aarch64_get_instr (cpu));
- assert group == GROUP_BREXSYS_1010 || group == GROUP_BREXSYS_1011
- bits [31,29] of a BrExSys are the secondary dispatch vector. */
- uint32_t group2 = dispatchBrExSys (aarch64_get_instr (cpu));
- switch (group2)
- {
- case BR_IMM_000:
- return dexBranchImmediate (cpu);
- case BR_IMMCMP_001:
- /* Compare has bit 25 clear while test has it set. */
- if (!INSTR (25, 25))
- dexCompareBranchImmediate (cpu);
- else
- dexTestBranchImmediate (cpu);
- return;
- case BR_IMMCOND_010:
- /* This is a conditional branch if bit 25 is clear otherwise
- unallocated. */
- if (!INSTR (25, 25))
- dexCondBranchImmediate (cpu);
- else
- HALT_UNALLOC;
- return;
- case BR_UNALLOC_011:
- HALT_UNALLOC;
- case BR_IMM_100:
- dexBranchImmediate (cpu);
- return;
- case BR_IMMCMP_101:
- /* Compare has bit 25 clear while test has it set. */
- if (!INSTR (25, 25))
- dexCompareBranchImmediate (cpu);
- else
- dexTestBranchImmediate (cpu);
- return;
- case BR_REG_110:
- /* Unconditional branch reg has bit 25 set. */
- if (INSTR (25, 25))
- dexBranchRegister (cpu);
- /* This includes both Excpn Gen, System and unalloc operations.
- We need to decode the Excpn Gen operation BRK so we can plant
- debugger entry points.
- Excpn Gen operations have instr [24] = 0.
- we need to decode at least one of the System operations NOP
- which is an alias for HINT #0.
- System operations have instr [24,22] = 100. */
- else if (INSTR (24, 24) == 0)
- dexExcpnGen (cpu);
- else if (INSTR (24, 22) == 4)
- dexSystem (cpu);
- else
- HALT_UNALLOC;
- return;
- case BR_UNALLOC_111:
- HALT_UNALLOC;
- default:
- /* Should never reach here. */
- HALT_NYI;
- }
- }
- static void
- aarch64_decode_and_execute (sim_cpu *cpu, uint64_t pc)
- {
- /* We need to check if gdb wants an in here. */
- /* checkBreak (cpu);. */
- uint64_t group = dispatchGroup (aarch64_get_instr (cpu));
- switch (group)
- {
- case GROUP_PSEUDO_0000: dexPseudo (cpu); break;
- case GROUP_LDST_0100: dexLdSt (cpu); break;
- case GROUP_DPREG_0101: dexDPReg (cpu); break;
- case GROUP_LDST_0110: dexLdSt (cpu); break;
- case GROUP_ADVSIMD_0111: dexAdvSIMD0 (cpu); break;
- case GROUP_DPIMM_1000: dexDPImm (cpu); break;
- case GROUP_DPIMM_1001: dexDPImm (cpu); break;
- case GROUP_BREXSYS_1010: dexBr (cpu); break;
- case GROUP_BREXSYS_1011: dexBr (cpu); break;
- case GROUP_LDST_1100: dexLdSt (cpu); break;
- case GROUP_DPREG_1101: dexDPReg (cpu); break;
- case GROUP_LDST_1110: dexLdSt (cpu); break;
- case GROUP_ADVSIMD_1111: dexAdvSIMD1 (cpu); break;
- case GROUP_UNALLOC_0001:
- case GROUP_UNALLOC_0010:
- case GROUP_UNALLOC_0011:
- HALT_UNALLOC;
- default:
- /* Should never reach here. */
- HALT_NYI;
- }
- }
- static bfd_boolean
- aarch64_step (sim_cpu *cpu)
- {
- uint64_t pc = aarch64_get_PC (cpu);
- if (pc == TOP_LEVEL_RETURN_PC)
- return FALSE;
- aarch64_set_next_PC (cpu, pc + 4);
- /* Code is always little-endian. */
- sim_core_read_buffer (CPU_STATE (cpu), cpu, read_map,
- & aarch64_get_instr (cpu), pc, 4);
- aarch64_get_instr (cpu) = endian_le2h_4 (aarch64_get_instr (cpu));
- TRACE_INSN (cpu, " pc = %" PRIx64 " instr = %08x", pc,
- aarch64_get_instr (cpu));
- TRACE_DISASM (cpu, pc);
- aarch64_decode_and_execute (cpu, pc);
- return TRUE;
- }
- void
- aarch64_run (SIM_DESC sd)
- {
- sim_cpu *cpu = STATE_CPU (sd, 0);
- while (aarch64_step (cpu))
- {
- aarch64_update_PC (cpu);
- if (sim_events_tick (sd))
- sim_events_process (sd);
- }
- sim_engine_halt (sd, cpu, NULL, aarch64_get_PC (cpu),
- sim_exited, aarch64_get_reg_s32 (cpu, R0, NO_SP));
- }
- void
- aarch64_init (sim_cpu *cpu, uint64_t pc)
- {
- uint64_t sp = aarch64_get_stack_start (cpu);
- /* Install SP, FP and PC and set LR to -20
- so we can detect a top-level return. */
- aarch64_set_reg_u64 (cpu, SP, SP_OK, sp);
- aarch64_set_reg_u64 (cpu, FP, SP_OK, sp);
- aarch64_set_reg_u64 (cpu, LR, SP_OK, TOP_LEVEL_RETURN_PC);
- aarch64_set_next_PC (cpu, pc);
- aarch64_update_PC (cpu);
- aarch64_init_LIT_table ();
- }
|