aarch64-asm.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342
  1. /* aarch64-asm.c -- AArch64 assembler support.
  2. Copyright (C) 2012-2022 Free Software Foundation, Inc.
  3. Contributed by ARM Ltd.
  4. This file is part of the GNU opcodes library.
  5. This library is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3, or (at your option)
  8. any later version.
  9. It is distributed in the hope that it will be useful, but WITHOUT
  10. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  11. or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
  12. License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program; see the file COPYING3. If not,
  15. see <http://www.gnu.org/licenses/>. */
  16. #include "sysdep.h"
  17. #include <stdarg.h>
  18. #include "libiberty.h"
  19. #include "aarch64-asm.h"
  20. #include "opintl.h"
  21. /* Utilities. */
  22. /* The unnamed arguments consist of the number of fields and information about
  23. these fields where the VALUE will be inserted into CODE. MASK can be zero or
  24. the base mask of the opcode.
  25. N.B. the fields are required to be in such an order than the least signficant
  26. field for VALUE comes the first, e.g. the <index> in
  27. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
  28. is encoded in H:L:M in some cases, the fields H:L:M should be passed in
  29. the order of M, L, H. */
  30. static inline void
  31. insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
  32. {
  33. uint32_t num;
  34. const aarch64_field *field;
  35. enum aarch64_field_kind kind;
  36. va_list va;
  37. va_start (va, mask);
  38. num = va_arg (va, uint32_t);
  39. assert (num <= 5);
  40. while (num--)
  41. {
  42. kind = va_arg (va, enum aarch64_field_kind);
  43. field = &fields[kind];
  44. insert_field (kind, code, value, mask);
  45. value >>= field->width;
  46. }
  47. va_end (va);
  48. }
  49. /* Insert a raw field value VALUE into all fields in SELF->fields.
  50. The least significant bit goes in the final field. */
  51. static void
  52. insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
  53. aarch64_insn value)
  54. {
  55. unsigned int i;
  56. enum aarch64_field_kind kind;
  57. for (i = ARRAY_SIZE (self->fields); i-- > 0; )
  58. if (self->fields[i] != FLD_NIL)
  59. {
  60. kind = self->fields[i];
  61. insert_field (kind, code, value, 0);
  62. value >>= fields[kind].width;
  63. }
  64. }
  65. /* Operand inserters. */
  66. /* Insert nothing. */
  67. bool
  68. aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
  69. const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
  70. aarch64_insn *code ATTRIBUTE_UNUSED,
  71. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  72. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  73. {
  74. return true;
  75. }
  76. /* Insert register number. */
  77. bool
  78. aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
  79. aarch64_insn *code,
  80. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  81. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  82. {
  83. insert_field (self->fields[0], code, info->reg.regno, 0);
  84. return true;
  85. }
  86. /* Insert register number, index and/or other data for SIMD register element
  87. operand, e.g. the last source operand in
  88. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
  89. bool
  90. aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
  91. aarch64_insn *code, const aarch64_inst *inst,
  92. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  93. {
  94. /* regno */
  95. insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
  96. /* index and/or type */
  97. if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
  98. {
  99. int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
  100. if (info->type == AARCH64_OPND_En
  101. && inst->opcode->operands[0] == AARCH64_OPND_Ed)
  102. {
  103. /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
  104. assert (info->idx == 1); /* Vn */
  105. aarch64_insn value = info->reglane.index << pos;
  106. insert_field (FLD_imm4, code, value, 0);
  107. }
  108. else
  109. {
  110. /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
  111. imm5<3:0> <V>
  112. 0000 RESERVED
  113. xxx1 B
  114. xx10 H
  115. x100 S
  116. 1000 D */
  117. aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
  118. insert_field (FLD_imm5, code, value, 0);
  119. }
  120. }
  121. else if (inst->opcode->iclass == dotproduct)
  122. {
  123. unsigned reglane_index = info->reglane.index;
  124. switch (info->qualifier)
  125. {
  126. case AARCH64_OPND_QLF_S_4B:
  127. case AARCH64_OPND_QLF_S_2H:
  128. /* L:H */
  129. assert (reglane_index < 4);
  130. insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
  131. break;
  132. default:
  133. return false;
  134. }
  135. }
  136. else if (inst->opcode->iclass == cryptosm3)
  137. {
  138. /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
  139. unsigned reglane_index = info->reglane.index;
  140. assert (reglane_index < 4);
  141. insert_field (FLD_SM3_imm2, code, reglane_index, 0);
  142. }
  143. else
  144. {
  145. /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
  146. or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
  147. unsigned reglane_index = info->reglane.index;
  148. if (inst->opcode->op == OP_FCMLA_ELEM)
  149. /* Complex operand takes two elements. */
  150. reglane_index *= 2;
  151. switch (info->qualifier)
  152. {
  153. case AARCH64_OPND_QLF_S_H:
  154. /* H:L:M */
  155. assert (reglane_index < 8);
  156. insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
  157. break;
  158. case AARCH64_OPND_QLF_S_S:
  159. /* H:L */
  160. assert (reglane_index < 4);
  161. insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
  162. break;
  163. case AARCH64_OPND_QLF_S_D:
  164. /* H */
  165. assert (reglane_index < 2);
  166. insert_field (FLD_H, code, reglane_index, 0);
  167. break;
  168. default:
  169. return false;
  170. }
  171. }
  172. return true;
  173. }
  174. /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
  175. bool
  176. aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
  177. aarch64_insn *code,
  178. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  179. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  180. {
  181. /* R */
  182. insert_field (self->fields[0], code, info->reglist.first_regno, 0);
  183. /* len */
  184. insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
  185. return true;
  186. }
  187. /* Insert Rt and opcode fields for a register list operand, e.g. Vt
  188. in AdvSIMD load/store instructions. */
  189. bool
  190. aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
  191. const aarch64_opnd_info *info, aarch64_insn *code,
  192. const aarch64_inst *inst,
  193. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  194. {
  195. aarch64_insn value = 0;
  196. /* Number of elements in each structure to be loaded/stored. */
  197. unsigned num = get_opcode_dependent_value (inst->opcode);
  198. /* Rt */
  199. insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
  200. /* opcode */
  201. switch (num)
  202. {
  203. case 1:
  204. switch (info->reglist.num_regs)
  205. {
  206. case 1: value = 0x7; break;
  207. case 2: value = 0xa; break;
  208. case 3: value = 0x6; break;
  209. case 4: value = 0x2; break;
  210. default: return false;
  211. }
  212. break;
  213. case 2:
  214. value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
  215. break;
  216. case 3:
  217. value = 0x4;
  218. break;
  219. case 4:
  220. value = 0x0;
  221. break;
  222. default:
  223. return false;
  224. }
  225. insert_field (FLD_opcode, code, value, 0);
  226. return true;
  227. }
  228. /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
  229. single structure to all lanes instructions. */
  230. bool
  231. aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
  232. const aarch64_opnd_info *info, aarch64_insn *code,
  233. const aarch64_inst *inst,
  234. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  235. {
  236. aarch64_insn value;
  237. /* The opcode dependent area stores the number of elements in
  238. each structure to be loaded/stored. */
  239. int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
  240. /* Rt */
  241. insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
  242. /* S */
  243. value = (aarch64_insn) 0;
  244. if (is_ld1r && info->reglist.num_regs == 2)
  245. /* OP_LD1R does not have alternating variant, but have "two consecutive"
  246. instead. */
  247. value = (aarch64_insn) 1;
  248. insert_field (FLD_S, code, value, 0);
  249. return true;
  250. }
  251. /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
  252. operand e.g. Vt in AdvSIMD load/store single element instructions. */
  253. bool
  254. aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
  255. const aarch64_opnd_info *info, aarch64_insn *code,
  256. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  257. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  258. {
  259. aarch64_field field = {0, 0};
  260. aarch64_insn QSsize = 0; /* fields Q:S:size. */
  261. aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
  262. assert (info->reglist.has_index);
  263. /* Rt */
  264. insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
  265. /* Encode the index, opcode<2:1> and size. */
  266. switch (info->qualifier)
  267. {
  268. case AARCH64_OPND_QLF_S_B:
  269. /* Index encoded in "Q:S:size". */
  270. QSsize = info->reglist.index;
  271. opcodeh2 = 0x0;
  272. break;
  273. case AARCH64_OPND_QLF_S_H:
  274. /* Index encoded in "Q:S:size<1>". */
  275. QSsize = info->reglist.index << 1;
  276. opcodeh2 = 0x1;
  277. break;
  278. case AARCH64_OPND_QLF_S_S:
  279. /* Index encoded in "Q:S". */
  280. QSsize = info->reglist.index << 2;
  281. opcodeh2 = 0x2;
  282. break;
  283. case AARCH64_OPND_QLF_S_D:
  284. /* Index encoded in "Q". */
  285. QSsize = info->reglist.index << 3 | 0x1;
  286. opcodeh2 = 0x2;
  287. break;
  288. default:
  289. return false;
  290. }
  291. insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
  292. gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
  293. insert_field_2 (&field, code, opcodeh2, 0);
  294. return true;
  295. }
  296. /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
  297. SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
  298. or SSHR <V><d>, <V><n>, #<shift>. */
  299. bool
  300. aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
  301. const aarch64_opnd_info *info,
  302. aarch64_insn *code, const aarch64_inst *inst,
  303. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  304. {
  305. unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
  306. aarch64_insn Q, imm;
  307. if (inst->opcode->iclass == asimdshf)
  308. {
  309. /* Q
  310. immh Q <T>
  311. 0000 x SEE AdvSIMD modified immediate
  312. 0001 0 8B
  313. 0001 1 16B
  314. 001x 0 4H
  315. 001x 1 8H
  316. 01xx 0 2S
  317. 01xx 1 4S
  318. 1xxx 0 RESERVED
  319. 1xxx 1 2D */
  320. Q = (val & 0x1) ? 1 : 0;
  321. insert_field (FLD_Q, code, Q, inst->opcode->mask);
  322. val >>= 1;
  323. }
  324. assert (info->type == AARCH64_OPND_IMM_VLSR
  325. || info->type == AARCH64_OPND_IMM_VLSL);
  326. if (info->type == AARCH64_OPND_IMM_VLSR)
  327. /* immh:immb
  328. immh <shift>
  329. 0000 SEE AdvSIMD modified immediate
  330. 0001 (16-UInt(immh:immb))
  331. 001x (32-UInt(immh:immb))
  332. 01xx (64-UInt(immh:immb))
  333. 1xxx (128-UInt(immh:immb)) */
  334. imm = (16 << (unsigned)val) - info->imm.value;
  335. else
  336. /* immh:immb
  337. immh <shift>
  338. 0000 SEE AdvSIMD modified immediate
  339. 0001 (UInt(immh:immb)-8)
  340. 001x (UInt(immh:immb)-16)
  341. 01xx (UInt(immh:immb)-32)
  342. 1xxx (UInt(immh:immb)-64) */
  343. imm = info->imm.value + (8 << (unsigned)val);
  344. insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
  345. return true;
  346. }
  347. /* Insert fields for e.g. the immediate operands in
  348. BFM <Wd>, <Wn>, #<immr>, #<imms>. */
  349. bool
  350. aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
  351. aarch64_insn *code,
  352. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  353. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  354. {
  355. int64_t imm;
  356. imm = info->imm.value;
  357. if (operand_need_shift_by_two (self))
  358. imm >>= 2;
  359. if (operand_need_shift_by_four (self))
  360. imm >>= 4;
  361. insert_all_fields (self, code, imm);
  362. return true;
  363. }
  364. /* Insert immediate and its shift amount for e.g. the last operand in
  365. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
  366. bool
  367. aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
  368. aarch64_insn *code, const aarch64_inst *inst,
  369. aarch64_operand_error *errors)
  370. {
  371. /* imm16 */
  372. aarch64_ins_imm (self, info, code, inst, errors);
  373. /* hw */
  374. insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
  375. return true;
  376. }
  377. /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
  378. MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
  379. bool
  380. aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
  381. const aarch64_opnd_info *info,
  382. aarch64_insn *code,
  383. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  384. aarch64_operand_error *errors
  385. ATTRIBUTE_UNUSED)
  386. {
  387. enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
  388. uint64_t imm = info->imm.value;
  389. enum aarch64_modifier_kind kind = info->shifter.kind;
  390. int amount = info->shifter.amount;
  391. aarch64_field field = {0, 0};
  392. /* a:b:c:d:e:f:g:h */
  393. if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
  394. {
  395. /* Either MOVI <Dd>, #<imm>
  396. or MOVI <Vd>.2D, #<imm>.
  397. <imm> is a 64-bit immediate
  398. "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
  399. encoded in "a:b:c:d:e:f:g:h". */
  400. imm = aarch64_shrink_expanded_imm8 (imm);
  401. assert ((int)imm >= 0);
  402. }
  403. insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
  404. if (kind == AARCH64_MOD_NONE)
  405. return true;
  406. /* shift amount partially in cmode */
  407. assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
  408. if (kind == AARCH64_MOD_LSL)
  409. {
  410. /* AARCH64_MOD_LSL: shift zeros. */
  411. int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
  412. assert (esize == 4 || esize == 2 || esize == 1);
  413. /* For 8-bit move immediate, the optional LSL #0 does not require
  414. encoding. */
  415. if (esize == 1)
  416. return true;
  417. amount >>= 3;
  418. if (esize == 4)
  419. gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
  420. else
  421. gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
  422. }
  423. else
  424. {
  425. /* AARCH64_MOD_MSL: shift ones. */
  426. amount >>= 4;
  427. gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
  428. }
  429. insert_field_2 (&field, code, amount, 0);
  430. return true;
  431. }
  432. /* Insert fields for an 8-bit floating-point immediate. */
  433. bool
  434. aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
  435. aarch64_insn *code,
  436. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  437. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  438. {
  439. insert_all_fields (self, code, info->imm.value);
  440. return true;
  441. }
  442. /* Insert 1-bit rotation immediate (#90 or #270). */
  443. bool
  444. aarch64_ins_imm_rotate1 (const aarch64_operand *self,
  445. const aarch64_opnd_info *info,
  446. aarch64_insn *code, const aarch64_inst *inst,
  447. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  448. {
  449. uint64_t rot = (info->imm.value - 90) / 180;
  450. assert (rot < 2U);
  451. insert_field (self->fields[0], code, rot, inst->opcode->mask);
  452. return true;
  453. }
  454. /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
  455. bool
  456. aarch64_ins_imm_rotate2 (const aarch64_operand *self,
  457. const aarch64_opnd_info *info,
  458. aarch64_insn *code, const aarch64_inst *inst,
  459. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  460. {
  461. uint64_t rot = info->imm.value / 90;
  462. assert (rot < 4U);
  463. insert_field (self->fields[0], code, rot, inst->opcode->mask);
  464. return true;
  465. }
  466. /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
  467. e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
  468. bool
  469. aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
  470. aarch64_insn *code,
  471. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  472. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  473. {
  474. insert_field (self->fields[0], code, 64 - info->imm.value, 0);
  475. return true;
  476. }
  477. /* Insert arithmetic immediate for e.g. the last operand in
  478. SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
  479. bool
  480. aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
  481. aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
  482. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  483. {
  484. /* shift */
  485. aarch64_insn value = info->shifter.amount ? 1 : 0;
  486. insert_field (self->fields[0], code, value, 0);
  487. /* imm12 (unsigned) */
  488. insert_field (self->fields[1], code, info->imm.value, 0);
  489. return true;
  490. }
  491. /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
  492. the operand should be inverted before encoding. */
  493. static bool
  494. aarch64_ins_limm_1 (const aarch64_operand *self,
  495. const aarch64_opnd_info *info, aarch64_insn *code,
  496. const aarch64_inst *inst, bool invert_p,
  497. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  498. {
  499. bool res;
  500. aarch64_insn value;
  501. uint64_t imm = info->imm.value;
  502. int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
  503. if (invert_p)
  504. imm = ~imm;
  505. /* The constraint check should guarantee that this will work. */
  506. res = aarch64_logical_immediate_p (imm, esize, &value);
  507. if (res)
  508. insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
  509. self->fields[0]);
  510. return res;
  511. }
  512. /* Insert logical/bitmask immediate for e.g. the last operand in
  513. ORR <Wd|WSP>, <Wn>, #<imm>. */
  514. bool
  515. aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
  516. aarch64_insn *code, const aarch64_inst *inst,
  517. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  518. {
  519. return aarch64_ins_limm_1 (self, info, code, inst,
  520. inst->opcode->op == OP_BIC, errors);
  521. }
  522. /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
  523. bool
  524. aarch64_ins_inv_limm (const aarch64_operand *self,
  525. const aarch64_opnd_info *info, aarch64_insn *code,
  526. const aarch64_inst *inst,
  527. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  528. {
  529. return aarch64_ins_limm_1 (self, info, code, inst, true, errors);
  530. }
  531. /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
  532. or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
  533. bool
  534. aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
  535. aarch64_insn *code, const aarch64_inst *inst,
  536. aarch64_operand_error *errors)
  537. {
  538. aarch64_insn value = 0;
  539. assert (info->idx == 0);
  540. /* Rt */
  541. aarch64_ins_regno (self, info, code, inst, errors);
  542. if (inst->opcode->iclass == ldstpair_indexed
  543. || inst->opcode->iclass == ldstnapair_offs
  544. || inst->opcode->iclass == ldstpair_off
  545. || inst->opcode->iclass == loadlit)
  546. {
  547. /* size */
  548. switch (info->qualifier)
  549. {
  550. case AARCH64_OPND_QLF_S_S: value = 0; break;
  551. case AARCH64_OPND_QLF_S_D: value = 1; break;
  552. case AARCH64_OPND_QLF_S_Q: value = 2; break;
  553. default: return false;
  554. }
  555. insert_field (FLD_ldst_size, code, value, 0);
  556. }
  557. else
  558. {
  559. /* opc[1]:size */
  560. value = aarch64_get_qualifier_standard_value (info->qualifier);
  561. insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
  562. }
  563. return true;
  564. }
  565. /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
  566. bool
  567. aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
  568. const aarch64_opnd_info *info, aarch64_insn *code,
  569. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  570. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  571. {
  572. /* Rn */
  573. insert_field (FLD_Rn, code, info->addr.base_regno, 0);
  574. return true;
  575. }
  576. /* Encode the address operand for e.g.
  577. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
  578. bool
  579. aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
  580. const aarch64_opnd_info *info, aarch64_insn *code,
  581. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  582. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  583. {
  584. aarch64_insn S;
  585. enum aarch64_modifier_kind kind = info->shifter.kind;
  586. /* Rn */
  587. insert_field (FLD_Rn, code, info->addr.base_regno, 0);
  588. /* Rm */
  589. insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
  590. /* option */
  591. if (kind == AARCH64_MOD_LSL)
  592. kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
  593. insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
  594. /* S */
  595. if (info->qualifier != AARCH64_OPND_QLF_S_B)
  596. S = info->shifter.amount != 0;
  597. else
  598. /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
  599. S <amount>
  600. 0 [absent]
  601. 1 #0
  602. Must be #0 if <extend> is explicitly LSL. */
  603. S = info->shifter.operator_present && info->shifter.amount_present;
  604. insert_field (FLD_S, code, S, 0);
  605. return true;
  606. }
  607. /* Encode the address operand for e.g.
  608. stlur <Xt>, [<Xn|SP>{, <amount>}]. */
  609. bool
  610. aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
  611. const aarch64_opnd_info *info, aarch64_insn *code,
  612. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  613. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  614. {
  615. /* Rn */
  616. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  617. /* simm9 */
  618. int imm = info->addr.offset.imm;
  619. insert_field (self->fields[1], code, imm, 0);
  620. /* writeback */
  621. if (info->addr.writeback)
  622. {
  623. assert (info->addr.preind == 1 && info->addr.postind == 0);
  624. insert_field (self->fields[2], code, 1, 0);
  625. }
  626. return true;
  627. }
  628. /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
  629. bool
  630. aarch64_ins_addr_simm (const aarch64_operand *self,
  631. const aarch64_opnd_info *info,
  632. aarch64_insn *code,
  633. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  634. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  635. {
  636. int imm;
  637. /* Rn */
  638. insert_field (FLD_Rn, code, info->addr.base_regno, 0);
  639. /* simm (imm9 or imm7) */
  640. imm = info->addr.offset.imm;
  641. if (self->fields[0] == FLD_imm7
  642. || info->qualifier == AARCH64_OPND_QLF_imm_tag)
  643. /* scaled immediate in ld/st pair instructions.. */
  644. imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
  645. insert_field (self->fields[0], code, imm, 0);
  646. /* pre/post- index */
  647. if (info->addr.writeback)
  648. {
  649. assert (inst->opcode->iclass != ldst_unscaled
  650. && inst->opcode->iclass != ldstnapair_offs
  651. && inst->opcode->iclass != ldstpair_off
  652. && inst->opcode->iclass != ldst_unpriv);
  653. assert (info->addr.preind != info->addr.postind);
  654. if (info->addr.preind)
  655. insert_field (self->fields[1], code, 1, 0);
  656. }
  657. return true;
  658. }
  659. /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
  660. bool
  661. aarch64_ins_addr_simm10 (const aarch64_operand *self,
  662. const aarch64_opnd_info *info,
  663. aarch64_insn *code,
  664. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  665. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  666. {
  667. int imm;
  668. /* Rn */
  669. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  670. /* simm10 */
  671. imm = info->addr.offset.imm >> 3;
  672. insert_field (self->fields[1], code, imm >> 9, 0);
  673. insert_field (self->fields[2], code, imm, 0);
  674. /* writeback */
  675. if (info->addr.writeback)
  676. {
  677. assert (info->addr.preind == 1 && info->addr.postind == 0);
  678. insert_field (self->fields[3], code, 1, 0);
  679. }
  680. return true;
  681. }
  682. /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
  683. bool
  684. aarch64_ins_addr_uimm12 (const aarch64_operand *self,
  685. const aarch64_opnd_info *info,
  686. aarch64_insn *code,
  687. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  688. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  689. {
  690. int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
  691. /* Rn */
  692. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  693. /* uimm12 */
  694. insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
  695. return true;
  696. }
  697. /* Encode the address operand for e.g.
  698. LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
  699. bool
  700. aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
  701. const aarch64_opnd_info *info, aarch64_insn *code,
  702. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  703. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  704. {
  705. /* Rn */
  706. insert_field (FLD_Rn, code, info->addr.base_regno, 0);
  707. /* Rm | #<amount> */
  708. if (info->addr.offset.is_reg)
  709. insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
  710. else
  711. insert_field (FLD_Rm, code, 0x1f, 0);
  712. return true;
  713. }
  714. /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
  715. bool
  716. aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
  717. const aarch64_opnd_info *info, aarch64_insn *code,
  718. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  719. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  720. {
  721. /* cond */
  722. insert_field (FLD_cond, code, info->cond->value, 0);
  723. return true;
  724. }
  725. /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
  726. bool
  727. aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
  728. const aarch64_opnd_info *info, aarch64_insn *code,
  729. const aarch64_inst *inst,
  730. aarch64_operand_error *detail ATTRIBUTE_UNUSED)
  731. {
  732. /* If a system instruction check if we have any restrictions on which
  733. registers it can use. */
  734. if (inst->opcode->iclass == ic_system)
  735. {
  736. uint64_t opcode_flags
  737. = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
  738. uint32_t sysreg_flags
  739. = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
  740. /* Check to see if it's read-only, else check if it's write only.
  741. if it's both or unspecified don't care. */
  742. if (opcode_flags == F_SYS_READ
  743. && sysreg_flags
  744. && sysreg_flags != F_REG_READ)
  745. {
  746. detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
  747. detail->error = _("specified register cannot be read from");
  748. detail->index = info->idx;
  749. detail->non_fatal = true;
  750. }
  751. else if (opcode_flags == F_SYS_WRITE
  752. && sysreg_flags
  753. && sysreg_flags != F_REG_WRITE)
  754. {
  755. detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
  756. detail->error = _("specified register cannot be written to");
  757. detail->index = info->idx;
  758. detail->non_fatal = true;
  759. }
  760. }
  761. /* op0:op1:CRn:CRm:op2 */
  762. insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
  763. FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
  764. return true;
  765. }
  766. /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
  767. bool
  768. aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
  769. const aarch64_opnd_info *info, aarch64_insn *code,
  770. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  771. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  772. {
  773. /* op1:op2 */
  774. insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
  775. FLD_op2, FLD_op1);
  776. /* Extra CRm mask. */
  777. if (info->sysreg.flags | F_REG_IN_CRM)
  778. insert_field (FLD_CRm, code, PSTATE_DECODE_CRM (info->sysreg.flags), 0);
  779. return true;
  780. }
  781. /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
  782. bool
  783. aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
  784. const aarch64_opnd_info *info, aarch64_insn *code,
  785. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  786. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  787. {
  788. /* op1:CRn:CRm:op2 */
  789. insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
  790. FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
  791. return true;
  792. }
  793. /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
  794. bool
  795. aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
  796. const aarch64_opnd_info *info, aarch64_insn *code,
  797. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  798. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  799. {
  800. /* CRm */
  801. insert_field (FLD_CRm, code, info->barrier->value, 0);
  802. return true;
  803. }
  804. /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
  805. bool
  806. aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
  807. const aarch64_opnd_info *info, aarch64_insn *code,
  808. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  809. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  810. {
  811. /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
  812. encoded in CRm<3:2>. */
  813. aarch64_insn value = (info->barrier->value >> 2) - 4;
  814. insert_field (FLD_CRm_dsb_nxs, code, value, 0);
  815. return true;
  816. }
  817. /* Encode the prefetch operation option operand for e.g.
  818. PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
  819. bool
  820. aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
  821. const aarch64_opnd_info *info, aarch64_insn *code,
  822. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  823. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  824. {
  825. /* prfop in Rt */
  826. insert_field (FLD_Rt, code, info->prfop->value, 0);
  827. return true;
  828. }
  829. /* Encode the hint number for instructions that alias HINT but take an
  830. operand. */
  831. bool
  832. aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
  833. const aarch64_opnd_info *info, aarch64_insn *code,
  834. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  835. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  836. {
  837. /* CRm:op2. */
  838. insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
  839. return true;
  840. }
  841. /* Encode the extended register operand for e.g.
  842. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
  843. bool
  844. aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
  845. const aarch64_opnd_info *info, aarch64_insn *code,
  846. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  847. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  848. {
  849. enum aarch64_modifier_kind kind;
  850. /* Rm */
  851. insert_field (FLD_Rm, code, info->reg.regno, 0);
  852. /* option */
  853. kind = info->shifter.kind;
  854. if (kind == AARCH64_MOD_LSL)
  855. kind = info->qualifier == AARCH64_OPND_QLF_W
  856. ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
  857. insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
  858. /* imm3 */
  859. insert_field (FLD_imm3, code, info->shifter.amount, 0);
  860. return true;
  861. }
  862. /* Encode the shifted register operand for e.g.
  863. SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
  864. bool
  865. aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
  866. const aarch64_opnd_info *info, aarch64_insn *code,
  867. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  868. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  869. {
  870. /* Rm */
  871. insert_field (FLD_Rm, code, info->reg.regno, 0);
  872. /* shift */
  873. insert_field (FLD_shift, code,
  874. aarch64_get_operand_modifier_value (info->shifter.kind), 0);
  875. /* imm6 */
  876. insert_field (FLD_imm6, code, info->shifter.amount, 0);
  877. return true;
  878. }
  879. /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
  880. where <simm4> is a 4-bit signed value and where <factor> is 1 plus
  881. SELF's operand-dependent value. fields[0] specifies the field that
  882. holds <base>. <simm4> is encoded in the SVE_imm4 field. */
  883. bool
  884. aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
  885. const aarch64_opnd_info *info,
  886. aarch64_insn *code,
  887. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  888. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  889. {
  890. int factor = 1 + get_operand_specific_data (self);
  891. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  892. insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
  893. return true;
  894. }
  895. /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
  896. where <simm6> is a 6-bit signed value and where <factor> is 1 plus
  897. SELF's operand-dependent value. fields[0] specifies the field that
  898. holds <base>. <simm6> is encoded in the SVE_imm6 field. */
  899. bool
  900. aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
  901. const aarch64_opnd_info *info,
  902. aarch64_insn *code,
  903. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  904. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  905. {
  906. int factor = 1 + get_operand_specific_data (self);
  907. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  908. insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
  909. return true;
  910. }
  911. /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
  912. where <simm9> is a 9-bit signed value and where <factor> is 1 plus
  913. SELF's operand-dependent value. fields[0] specifies the field that
  914. holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
  915. and imm3 fields, with imm3 being the less-significant part. */
  916. bool
  917. aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
  918. const aarch64_opnd_info *info,
  919. aarch64_insn *code,
  920. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  921. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  922. {
  923. int factor = 1 + get_operand_specific_data (self);
  924. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  925. insert_fields (code, info->addr.offset.imm / factor, 0,
  926. 2, FLD_imm3, FLD_SVE_imm6);
  927. return true;
  928. }
  929. /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
  930. is a 4-bit signed number and where <shift> is SELF's operand-dependent
  931. value. fields[0] specifies the base register field. */
  932. bool
  933. aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
  934. const aarch64_opnd_info *info, aarch64_insn *code,
  935. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  936. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  937. {
  938. int factor = 1 << get_operand_specific_data (self);
  939. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  940. insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
  941. return true;
  942. }
  943. /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
  944. is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
  945. value. fields[0] specifies the base register field. */
  946. bool
  947. aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
  948. const aarch64_opnd_info *info, aarch64_insn *code,
  949. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  950. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  951. {
  952. int factor = 1 << get_operand_specific_data (self);
  953. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  954. insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
  955. return true;
  956. }
  957. /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
  958. is SELF's operand-dependent value. fields[0] specifies the base
  959. register field and fields[1] specifies the offset register field. */
  960. bool
  961. aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
  962. const aarch64_opnd_info *info, aarch64_insn *code,
  963. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  964. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  965. {
  966. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  967. insert_field (self->fields[1], code, info->addr.offset.regno, 0);
  968. return true;
  969. }
  970. /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
  971. <shift> is SELF's operand-dependent value. fields[0] specifies the
  972. base register field, fields[1] specifies the offset register field and
  973. fields[2] is a single-bit field that selects SXTW over UXTW. */
  974. bool
  975. aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
  976. const aarch64_opnd_info *info, aarch64_insn *code,
  977. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  978. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  979. {
  980. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  981. insert_field (self->fields[1], code, info->addr.offset.regno, 0);
  982. if (info->shifter.kind == AARCH64_MOD_UXTW)
  983. insert_field (self->fields[2], code, 0, 0);
  984. else
  985. insert_field (self->fields[2], code, 1, 0);
  986. return true;
  987. }
  988. /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
  989. 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
  990. fields[0] specifies the base register field. */
  991. bool
  992. aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
  993. const aarch64_opnd_info *info, aarch64_insn *code,
  994. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  995. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  996. {
  997. int factor = 1 << get_operand_specific_data (self);
  998. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  999. insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
  1000. return true;
  1001. }
  1002. /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
  1003. where <modifier> is fixed by the instruction and where <msz> is a
  1004. 2-bit unsigned number. fields[0] specifies the base register field
  1005. and fields[1] specifies the offset register field. */
  1006. static bool
  1007. aarch64_ext_sve_addr_zz (const aarch64_operand *self,
  1008. const aarch64_opnd_info *info, aarch64_insn *code,
  1009. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1010. {
  1011. insert_field (self->fields[0], code, info->addr.base_regno, 0);
  1012. insert_field (self->fields[1], code, info->addr.offset.regno, 0);
  1013. insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
  1014. return true;
  1015. }
  1016. /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
  1017. <msz> is a 2-bit unsigned number. fields[0] specifies the base register
  1018. field and fields[1] specifies the offset register field. */
  1019. bool
  1020. aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
  1021. const aarch64_opnd_info *info, aarch64_insn *code,
  1022. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1023. aarch64_operand_error *errors)
  1024. {
  1025. return aarch64_ext_sve_addr_zz (self, info, code, errors);
  1026. }
  1027. /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
  1028. <msz> is a 2-bit unsigned number. fields[0] specifies the base register
  1029. field and fields[1] specifies the offset register field. */
  1030. bool
  1031. aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
  1032. const aarch64_opnd_info *info,
  1033. aarch64_insn *code,
  1034. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1035. aarch64_operand_error *errors)
  1036. {
  1037. return aarch64_ext_sve_addr_zz (self, info, code, errors);
  1038. }
  1039. /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
  1040. <msz> is a 2-bit unsigned number. fields[0] specifies the base register
  1041. field and fields[1] specifies the offset register field. */
  1042. bool
  1043. aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
  1044. const aarch64_opnd_info *info,
  1045. aarch64_insn *code,
  1046. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1047. aarch64_operand_error *errors)
  1048. {
  1049. return aarch64_ext_sve_addr_zz (self, info, code, errors);
  1050. }
  1051. /* Encode an SVE ADD/SUB immediate. */
  1052. bool
  1053. aarch64_ins_sve_aimm (const aarch64_operand *self,
  1054. const aarch64_opnd_info *info, aarch64_insn *code,
  1055. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1056. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1057. {
  1058. if (info->shifter.amount == 8)
  1059. insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
  1060. else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
  1061. insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
  1062. else
  1063. insert_all_fields (self, code, info->imm.value & 0xff);
  1064. return true;
  1065. }
  1066. /* Encode an SVE CPY/DUP immediate. */
  1067. bool
  1068. aarch64_ins_sve_asimm (const aarch64_operand *self,
  1069. const aarch64_opnd_info *info, aarch64_insn *code,
  1070. const aarch64_inst *inst,
  1071. aarch64_operand_error *errors)
  1072. {
  1073. return aarch64_ins_sve_aimm (self, info, code, inst, errors);
  1074. }
  1075. /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
  1076. array specifies which field to use for Zn. MM is encoded in the
  1077. concatenation of imm5 and SVE_tszh, with imm5 being the less
  1078. significant part. */
  1079. bool
  1080. aarch64_ins_sve_index (const aarch64_operand *self,
  1081. const aarch64_opnd_info *info, aarch64_insn *code,
  1082. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1083. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1084. {
  1085. unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
  1086. insert_field (self->fields[0], code, info->reglane.regno, 0);
  1087. insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
  1088. 2, FLD_imm5, FLD_SVE_tszh);
  1089. return true;
  1090. }
  1091. /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
  1092. bool
  1093. aarch64_ins_sve_limm_mov (const aarch64_operand *self,
  1094. const aarch64_opnd_info *info, aarch64_insn *code,
  1095. const aarch64_inst *inst,
  1096. aarch64_operand_error *errors)
  1097. {
  1098. return aarch64_ins_limm (self, info, code, inst, errors);
  1099. }
  1100. /* Encode Zn[MM], where Zn occupies the least-significant part of the field
  1101. and where MM occupies the most-significant part. The operand-dependent
  1102. value specifies the number of bits in Zn. */
  1103. bool
  1104. aarch64_ins_sve_quad_index (const aarch64_operand *self,
  1105. const aarch64_opnd_info *info, aarch64_insn *code,
  1106. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1107. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1108. {
  1109. unsigned int reg_bits = get_operand_specific_data (self);
  1110. assert (info->reglane.regno < (1U << reg_bits));
  1111. unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
  1112. insert_all_fields (self, code, val);
  1113. return true;
  1114. }
  1115. /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
  1116. to use for Zn. */
  1117. bool
  1118. aarch64_ins_sve_reglist (const aarch64_operand *self,
  1119. const aarch64_opnd_info *info, aarch64_insn *code,
  1120. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1121. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1122. {
  1123. insert_field (self->fields[0], code, info->reglist.first_regno, 0);
  1124. return true;
  1125. }
  1126. /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
  1127. fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
  1128. field. */
  1129. bool
  1130. aarch64_ins_sve_scale (const aarch64_operand *self,
  1131. const aarch64_opnd_info *info, aarch64_insn *code,
  1132. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1133. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1134. {
  1135. insert_all_fields (self, code, info->imm.value);
  1136. insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
  1137. return true;
  1138. }
  1139. /* Encode an SVE shift left immediate. */
  1140. bool
  1141. aarch64_ins_sve_shlimm (const aarch64_operand *self,
  1142. const aarch64_opnd_info *info, aarch64_insn *code,
  1143. const aarch64_inst *inst,
  1144. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1145. {
  1146. const aarch64_opnd_info *prev_operand;
  1147. unsigned int esize;
  1148. assert (info->idx > 0);
  1149. prev_operand = &inst->operands[info->idx - 1];
  1150. esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
  1151. insert_all_fields (self, code, 8 * esize + info->imm.value);
  1152. return true;
  1153. }
  1154. /* Encode an SVE shift right immediate. */
  1155. bool
  1156. aarch64_ins_sve_shrimm (const aarch64_operand *self,
  1157. const aarch64_opnd_info *info, aarch64_insn *code,
  1158. const aarch64_inst *inst,
  1159. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1160. {
  1161. const aarch64_opnd_info *prev_operand;
  1162. unsigned int esize;
  1163. unsigned int opnd_backshift = get_operand_specific_data (self);
  1164. assert (info->idx >= (int)opnd_backshift);
  1165. prev_operand = &inst->operands[info->idx - opnd_backshift];
  1166. esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
  1167. insert_all_fields (self, code, 16 * esize - info->imm.value);
  1168. return true;
  1169. }
  1170. /* Encode a single-bit immediate that selects between #0.5 and #1.0.
  1171. The fields array specifies which field to use. */
  1172. bool
  1173. aarch64_ins_sve_float_half_one (const aarch64_operand *self,
  1174. const aarch64_opnd_info *info,
  1175. aarch64_insn *code,
  1176. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1177. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1178. {
  1179. if (info->imm.value == 0x3f000000)
  1180. insert_field (self->fields[0], code, 0, 0);
  1181. else
  1182. insert_field (self->fields[0], code, 1, 0);
  1183. return true;
  1184. }
  1185. /* Encode a single-bit immediate that selects between #0.5 and #2.0.
  1186. The fields array specifies which field to use. */
  1187. bool
  1188. aarch64_ins_sve_float_half_two (const aarch64_operand *self,
  1189. const aarch64_opnd_info *info,
  1190. aarch64_insn *code,
  1191. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1192. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1193. {
  1194. if (info->imm.value == 0x3f000000)
  1195. insert_field (self->fields[0], code, 0, 0);
  1196. else
  1197. insert_field (self->fields[0], code, 1, 0);
  1198. return true;
  1199. }
  1200. /* Encode a single-bit immediate that selects between #0.0 and #1.0.
  1201. The fields array specifies which field to use. */
  1202. bool
  1203. aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
  1204. const aarch64_opnd_info *info,
  1205. aarch64_insn *code,
  1206. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1207. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1208. {
  1209. if (info->imm.value == 0)
  1210. insert_field (self->fields[0], code, 0, 0);
  1211. else
  1212. insert_field (self->fields[0], code, 1, 0);
  1213. return true;
  1214. }
  1215. /* Encode in SME instruction such as MOVA ZA tile vector register number,
  1216. vector indicator, vector selector and immediate. */
  1217. bool
  1218. aarch64_ins_sme_za_hv_tiles (const aarch64_operand *self,
  1219. const aarch64_opnd_info *info,
  1220. aarch64_insn *code,
  1221. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1222. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1223. {
  1224. int fld_size;
  1225. int fld_q;
  1226. int fld_v = info->za_tile_vector.v;
  1227. int fld_rv = info->za_tile_vector.index.regno - 12;
  1228. int fld_zan_imm = info->za_tile_vector.index.imm;
  1229. int regno = info->za_tile_vector.regno;
  1230. switch (info->qualifier)
  1231. {
  1232. case AARCH64_OPND_QLF_S_B:
  1233. fld_size = 0;
  1234. fld_q = 0;
  1235. break;
  1236. case AARCH64_OPND_QLF_S_H:
  1237. fld_size = 1;
  1238. fld_q = 0;
  1239. fld_zan_imm |= regno << 3;
  1240. break;
  1241. case AARCH64_OPND_QLF_S_S:
  1242. fld_size = 2;
  1243. fld_q = 0;
  1244. fld_zan_imm |= regno << 2;
  1245. break;
  1246. case AARCH64_OPND_QLF_S_D:
  1247. fld_size = 3;
  1248. fld_q = 0;
  1249. fld_zan_imm |= regno << 1;
  1250. break;
  1251. case AARCH64_OPND_QLF_S_Q:
  1252. fld_size = 3;
  1253. fld_q = 1;
  1254. fld_zan_imm = regno;
  1255. break;
  1256. default:
  1257. return false;
  1258. }
  1259. insert_field (self->fields[0], code, fld_size, 0);
  1260. insert_field (self->fields[1], code, fld_q, 0);
  1261. insert_field (self->fields[2], code, fld_v, 0);
  1262. insert_field (self->fields[3], code, fld_rv, 0);
  1263. insert_field (self->fields[4], code, fld_zan_imm, 0);
  1264. return true;
  1265. }
  1266. /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
  1267. separated by commas, encoded in the "imm8" field.
  1268. For programmer convenience an assembler must also accept the names of
  1269. 32-bit, 16-bit and 8-bit element tiles which are converted into the
  1270. corresponding set of 64-bit element tiles.
  1271. */
  1272. bool
  1273. aarch64_ins_sme_za_list (const aarch64_operand *self,
  1274. const aarch64_opnd_info *info,
  1275. aarch64_insn *code,
  1276. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1277. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1278. {
  1279. int fld_mask = info->imm.value;
  1280. insert_field (self->fields[0], code, fld_mask, 0);
  1281. return true;
  1282. }
  1283. bool
  1284. aarch64_ins_sme_za_array (const aarch64_operand *self,
  1285. const aarch64_opnd_info *info,
  1286. aarch64_insn *code,
  1287. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1288. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1289. {
  1290. int regno = info->za_tile_vector.index.regno - 12;
  1291. int imm = info->za_tile_vector.index.imm;
  1292. insert_field (self->fields[0], code, regno, 0);
  1293. insert_field (self->fields[1], code, imm, 0);
  1294. return true;
  1295. }
  1296. bool
  1297. aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand *self,
  1298. const aarch64_opnd_info *info,
  1299. aarch64_insn *code,
  1300. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1301. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1302. {
  1303. int regno = info->addr.base_regno;
  1304. int imm = info->addr.offset.imm;
  1305. insert_field (self->fields[0], code, regno, 0);
  1306. insert_field (self->fields[1], code, imm, 0);
  1307. return true;
  1308. }
  1309. /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
  1310. bool
  1311. aarch64_ins_sme_sm_za (const aarch64_operand *self,
  1312. const aarch64_opnd_info *info,
  1313. aarch64_insn *code,
  1314. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1315. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1316. {
  1317. aarch64_insn fld_crm;
  1318. /* Set CRm[3:1] bits. */
  1319. if (info->reg.regno == 's')
  1320. fld_crm = 0x02 ; /* SVCRSM. */
  1321. else if (info->reg.regno == 'z')
  1322. fld_crm = 0x04; /* SVCRZA. */
  1323. else
  1324. return false;
  1325. insert_field (self->fields[0], code, fld_crm, 0);
  1326. return true;
  1327. }
  1328. /* Encode source scalable predicate register (Pn), name of the index base
  1329. register W12-W15 (Rm), and optional element index, defaulting to 0, in the
  1330. range 0 to one less than the number of vector elements in a 128-bit vector
  1331. register, encoded in "i1:tszh:tszl".
  1332. */
  1333. bool
  1334. aarch64_ins_sme_pred_reg_with_index (const aarch64_operand *self,
  1335. const aarch64_opnd_info *info,
  1336. aarch64_insn *code,
  1337. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1338. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1339. {
  1340. int fld_pn = info->za_tile_vector.regno;
  1341. int fld_rm = info->za_tile_vector.index.regno - 12;
  1342. int imm = info->za_tile_vector.index.imm;
  1343. int fld_i1, fld_tszh, fld_tshl;
  1344. insert_field (self->fields[0], code, fld_rm, 0);
  1345. insert_field (self->fields[1], code, fld_pn, 0);
  1346. /* Optional element index, defaulting to 0, in the range 0 to one less than
  1347. the number of vector elements in a 128-bit vector register, encoded in
  1348. "i1:tszh:tszl".
  1349. i1 tszh tszl <T>
  1350. 0 0 000 RESERVED
  1351. x x xx1 B
  1352. x x x10 H
  1353. x x 100 S
  1354. x 1 000 D
  1355. */
  1356. switch (info->qualifier)
  1357. {
  1358. case AARCH64_OPND_QLF_S_B:
  1359. /* <imm> is 4 bit value. */
  1360. fld_i1 = (imm >> 3) & 0x1;
  1361. fld_tszh = (imm >> 2) & 0x1;
  1362. fld_tshl = ((imm << 1) | 0x1) & 0x7;
  1363. break;
  1364. case AARCH64_OPND_QLF_S_H:
  1365. /* <imm> is 3 bit value. */
  1366. fld_i1 = (imm >> 2) & 0x1;
  1367. fld_tszh = (imm >> 1) & 0x1;
  1368. fld_tshl = ((imm << 2) | 0x2) & 0x7;
  1369. break;
  1370. case AARCH64_OPND_QLF_S_S:
  1371. /* <imm> is 2 bit value. */
  1372. fld_i1 = (imm >> 1) & 0x1;
  1373. fld_tszh = imm & 0x1;
  1374. fld_tshl = 0x4;
  1375. break;
  1376. case AARCH64_OPND_QLF_S_D:
  1377. /* <imm> is 1 bit value. */
  1378. fld_i1 = imm & 0x1;
  1379. fld_tszh = 0x1;
  1380. fld_tshl = 0x0;
  1381. break;
  1382. default:
  1383. return false;
  1384. }
  1385. insert_field (self->fields[2], code, fld_i1, 0);
  1386. insert_field (self->fields[3], code, fld_tszh, 0);
  1387. insert_field (self->fields[4], code, fld_tshl, 0);
  1388. return true;
  1389. }
  1390. /* Insert X0-X30. Register 31 is unallocated. */
  1391. bool
  1392. aarch64_ins_x0_to_x30 (const aarch64_operand *self,
  1393. const aarch64_opnd_info *info,
  1394. aarch64_insn *code,
  1395. const aarch64_inst *inst ATTRIBUTE_UNUSED,
  1396. aarch64_operand_error *errors ATTRIBUTE_UNUSED)
  1397. {
  1398. assert (info->reg.regno <= 30);
  1399. insert_field (self->fields[0], code, info->reg.regno, 0);
  1400. return true;
  1401. }
  1402. /* Miscellaneous encoding functions. */
  1403. /* Encode size[0], i.e. bit 22, for
  1404. e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
  1405. static void
  1406. encode_asimd_fcvt (aarch64_inst *inst)
  1407. {
  1408. aarch64_insn value;
  1409. aarch64_field field = {0, 0};
  1410. enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_NIL;
  1411. switch (inst->opcode->op)
  1412. {
  1413. case OP_FCVTN:
  1414. case OP_FCVTN2:
  1415. /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
  1416. qualifier = inst->operands[1].qualifier;
  1417. break;
  1418. case OP_FCVTL:
  1419. case OP_FCVTL2:
  1420. /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
  1421. qualifier = inst->operands[0].qualifier;
  1422. break;
  1423. default:
  1424. return;
  1425. }
  1426. assert (qualifier == AARCH64_OPND_QLF_V_4S
  1427. || qualifier == AARCH64_OPND_QLF_V_2D);
  1428. value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
  1429. gen_sub_field (FLD_size, 0, 1, &field);
  1430. insert_field_2 (&field, &inst->value, value, 0);
  1431. }
  1432. /* Encode size[0], i.e. bit 22, for
  1433. e.g. FCVTXN <Vb><d>, <Va><n>. */
  1434. static void
  1435. encode_asisd_fcvtxn (aarch64_inst *inst)
  1436. {
  1437. aarch64_insn val = 1;
  1438. aarch64_field field = {0, 0};
  1439. assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
  1440. gen_sub_field (FLD_size, 0, 1, &field);
  1441. insert_field_2 (&field, &inst->value, val, 0);
  1442. }
  1443. /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
  1444. static void
  1445. encode_fcvt (aarch64_inst *inst)
  1446. {
  1447. aarch64_insn val;
  1448. const aarch64_field field = {15, 2};
  1449. /* opc dstsize */
  1450. switch (inst->operands[0].qualifier)
  1451. {
  1452. case AARCH64_OPND_QLF_S_S: val = 0; break;
  1453. case AARCH64_OPND_QLF_S_D: val = 1; break;
  1454. case AARCH64_OPND_QLF_S_H: val = 3; break;
  1455. default: abort ();
  1456. }
  1457. insert_field_2 (&field, &inst->value, val, 0);
  1458. return;
  1459. }
  1460. /* Return the index in qualifiers_list that INST is using. Should only
  1461. be called once the qualifiers are known to be valid. */
  1462. static int
  1463. aarch64_get_variant (struct aarch64_inst *inst)
  1464. {
  1465. int i, nops, variant;
  1466. nops = aarch64_num_of_operands (inst->opcode);
  1467. for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
  1468. {
  1469. for (i = 0; i < nops; ++i)
  1470. if (inst->opcode->qualifiers_list[variant][i]
  1471. != inst->operands[i].qualifier)
  1472. break;
  1473. if (i == nops)
  1474. return variant;
  1475. }
  1476. abort ();
  1477. }
  1478. /* Do miscellaneous encodings that are not common enough to be driven by
  1479. flags. */
  1480. static void
  1481. do_misc_encoding (aarch64_inst *inst)
  1482. {
  1483. unsigned int value;
  1484. switch (inst->opcode->op)
  1485. {
  1486. case OP_FCVT:
  1487. encode_fcvt (inst);
  1488. break;
  1489. case OP_FCVTN:
  1490. case OP_FCVTN2:
  1491. case OP_FCVTL:
  1492. case OP_FCVTL2:
  1493. encode_asimd_fcvt (inst);
  1494. break;
  1495. case OP_FCVTXN_S:
  1496. encode_asisd_fcvtxn (inst);
  1497. break;
  1498. case OP_MOV_P_P:
  1499. case OP_MOVS_P_P:
  1500. /* Copy Pn to Pm and Pg. */
  1501. value = extract_field (FLD_SVE_Pn, inst->value, 0);
  1502. insert_field (FLD_SVE_Pm, &inst->value, value, 0);
  1503. insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
  1504. break;
  1505. case OP_MOV_Z_P_Z:
  1506. /* Copy Zd to Zm. */
  1507. value = extract_field (FLD_SVE_Zd, inst->value, 0);
  1508. insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
  1509. break;
  1510. case OP_MOV_Z_V:
  1511. /* Fill in the zero immediate. */
  1512. insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
  1513. 2, FLD_imm5, FLD_SVE_tszh);
  1514. break;
  1515. case OP_MOV_Z_Z:
  1516. /* Copy Zn to Zm. */
  1517. value = extract_field (FLD_SVE_Zn, inst->value, 0);
  1518. insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
  1519. break;
  1520. case OP_MOV_Z_Zi:
  1521. break;
  1522. case OP_MOVM_P_P_P:
  1523. /* Copy Pd to Pm. */
  1524. value = extract_field (FLD_SVE_Pd, inst->value, 0);
  1525. insert_field (FLD_SVE_Pm, &inst->value, value, 0);
  1526. break;
  1527. case OP_MOVZS_P_P_P:
  1528. case OP_MOVZ_P_P_P:
  1529. /* Copy Pn to Pm. */
  1530. value = extract_field (FLD_SVE_Pn, inst->value, 0);
  1531. insert_field (FLD_SVE_Pm, &inst->value, value, 0);
  1532. break;
  1533. case OP_NOTS_P_P_P_Z:
  1534. case OP_NOT_P_P_P_Z:
  1535. /* Copy Pg to Pm. */
  1536. value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
  1537. insert_field (FLD_SVE_Pm, &inst->value, value, 0);
  1538. break;
  1539. default: break;
  1540. }
  1541. }
  1542. /* Encode the 'size' and 'Q' field for e.g. SHADD. */
  1543. static void
  1544. encode_sizeq (aarch64_inst *inst)
  1545. {
  1546. aarch64_insn sizeq;
  1547. enum aarch64_field_kind kind;
  1548. int idx;
  1549. /* Get the index of the operand whose information we are going to use
  1550. to encode the size and Q fields.
  1551. This is deduced from the possible valid qualifier lists. */
  1552. idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
  1553. DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
  1554. aarch64_get_qualifier_name (inst->operands[idx].qualifier));
  1555. sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
  1556. /* Q */
  1557. insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
  1558. /* size */
  1559. if (inst->opcode->iclass == asisdlse
  1560. || inst->opcode->iclass == asisdlsep
  1561. || inst->opcode->iclass == asisdlso
  1562. || inst->opcode->iclass == asisdlsop)
  1563. kind = FLD_vldst_size;
  1564. else
  1565. kind = FLD_size;
  1566. insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
  1567. }
  1568. /* Opcodes that have fields shared by multiple operands are usually flagged
  1569. with flags. In this function, we detect such flags and use the
  1570. information in one of the related operands to do the encoding. The 'one'
  1571. operand is not any operand but one of the operands that has the enough
  1572. information for such an encoding. */
  1573. static void
  1574. do_special_encoding (struct aarch64_inst *inst)
  1575. {
  1576. int idx;
  1577. aarch64_insn value = 0;
  1578. DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
  1579. /* Condition for truly conditional executed instructions, e.g. b.cond. */
  1580. if (inst->opcode->flags & F_COND)
  1581. {
  1582. insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
  1583. }
  1584. if (inst->opcode->flags & F_SF)
  1585. {
  1586. idx = select_operand_for_sf_field_coding (inst->opcode);
  1587. value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
  1588. || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
  1589. ? 1 : 0;
  1590. insert_field (FLD_sf, &inst->value, value, 0);
  1591. if (inst->opcode->flags & F_N)
  1592. insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
  1593. }
  1594. if (inst->opcode->flags & F_LSE_SZ)
  1595. {
  1596. idx = select_operand_for_sf_field_coding (inst->opcode);
  1597. value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
  1598. || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
  1599. ? 1 : 0;
  1600. insert_field (FLD_lse_sz, &inst->value, value, 0);
  1601. }
  1602. if (inst->opcode->flags & F_SIZEQ)
  1603. encode_sizeq (inst);
  1604. if (inst->opcode->flags & F_FPTYPE)
  1605. {
  1606. idx = select_operand_for_fptype_field_coding (inst->opcode);
  1607. switch (inst->operands[idx].qualifier)
  1608. {
  1609. case AARCH64_OPND_QLF_S_S: value = 0; break;
  1610. case AARCH64_OPND_QLF_S_D: value = 1; break;
  1611. case AARCH64_OPND_QLF_S_H: value = 3; break;
  1612. default: return;
  1613. }
  1614. insert_field (FLD_type, &inst->value, value, 0);
  1615. }
  1616. if (inst->opcode->flags & F_SSIZE)
  1617. {
  1618. enum aarch64_opnd_qualifier qualifier;
  1619. idx = select_operand_for_scalar_size_field_coding (inst->opcode);
  1620. qualifier = inst->operands[idx].qualifier;
  1621. assert (qualifier >= AARCH64_OPND_QLF_S_B
  1622. && qualifier <= AARCH64_OPND_QLF_S_Q);
  1623. value = aarch64_get_qualifier_standard_value (qualifier);
  1624. insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
  1625. }
  1626. if (inst->opcode->flags & F_T)
  1627. {
  1628. int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
  1629. aarch64_field field = {0, 0};
  1630. enum aarch64_opnd_qualifier qualifier;
  1631. idx = 0;
  1632. qualifier = inst->operands[idx].qualifier;
  1633. assert (aarch64_get_operand_class (inst->opcode->operands[0])
  1634. == AARCH64_OPND_CLASS_SIMD_REG
  1635. && qualifier >= AARCH64_OPND_QLF_V_8B
  1636. && qualifier <= AARCH64_OPND_QLF_V_2D);
  1637. /* imm5<3:0> q <t>
  1638. 0000 x reserved
  1639. xxx1 0 8b
  1640. xxx1 1 16b
  1641. xx10 0 4h
  1642. xx10 1 8h
  1643. x100 0 2s
  1644. x100 1 4s
  1645. 1000 0 reserved
  1646. 1000 1 2d */
  1647. value = aarch64_get_qualifier_standard_value (qualifier);
  1648. insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
  1649. num = (int) value >> 1;
  1650. assert (num >= 0 && num <= 3);
  1651. gen_sub_field (FLD_imm5, 0, num + 1, &field);
  1652. insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
  1653. }
  1654. if (inst->opcode->flags & F_GPRSIZE_IN_Q)
  1655. {
  1656. /* Use Rt to encode in the case of e.g.
  1657. STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
  1658. enum aarch64_opnd_qualifier qualifier;
  1659. idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
  1660. if (idx == -1)
  1661. /* Otherwise use the result operand, which has to be a integer
  1662. register. */
  1663. idx = 0;
  1664. assert (idx == 0 || idx == 1);
  1665. assert (aarch64_get_operand_class (inst->opcode->operands[idx])
  1666. == AARCH64_OPND_CLASS_INT_REG);
  1667. qualifier = inst->operands[idx].qualifier;
  1668. insert_field (FLD_Q, &inst->value,
  1669. aarch64_get_qualifier_standard_value (qualifier), 0);
  1670. }
  1671. if (inst->opcode->flags & F_LDS_SIZE)
  1672. {
  1673. /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
  1674. enum aarch64_opnd_qualifier qualifier;
  1675. aarch64_field field = {0, 0};
  1676. assert (aarch64_get_operand_class (inst->opcode->operands[0])
  1677. == AARCH64_OPND_CLASS_INT_REG);
  1678. gen_sub_field (FLD_opc, 0, 1, &field);
  1679. qualifier = inst->operands[0].qualifier;
  1680. insert_field_2 (&field, &inst->value,
  1681. 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
  1682. }
  1683. /* Miscellaneous encoding as the last step. */
  1684. if (inst->opcode->flags & F_MISC)
  1685. do_misc_encoding (inst);
  1686. DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
  1687. }
  1688. /* Some instructions (including all SVE ones) use the instruction class
  1689. to describe how a qualifiers_list index is represented in the instruction
  1690. encoding. If INST is such an instruction, encode the chosen qualifier
  1691. variant. */
  1692. static void
  1693. aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
  1694. {
  1695. int variant = 0;
  1696. switch (inst->opcode->iclass)
  1697. {
  1698. case sve_cpy:
  1699. insert_fields (&inst->value, aarch64_get_variant (inst),
  1700. 0, 2, FLD_SVE_M_14, FLD_size);
  1701. break;
  1702. case sve_index:
  1703. case sve_shift_pred:
  1704. case sve_shift_unpred:
  1705. case sve_shift_tsz_hsd:
  1706. case sve_shift_tsz_bhsd:
  1707. /* For indices and shift amounts, the variant is encoded as
  1708. part of the immediate. */
  1709. break;
  1710. case sve_limm:
  1711. /* For sve_limm, the .B, .H, and .S forms are just a convenience
  1712. and depend on the immediate. They don't have a separate
  1713. encoding. */
  1714. break;
  1715. case sve_misc:
  1716. /* sve_misc instructions have only a single variant. */
  1717. break;
  1718. case sve_movprfx:
  1719. insert_fields (&inst->value, aarch64_get_variant (inst),
  1720. 0, 2, FLD_SVE_M_16, FLD_size);
  1721. break;
  1722. case sve_pred_zm:
  1723. insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
  1724. break;
  1725. case sve_size_bhs:
  1726. case sve_size_bhsd:
  1727. insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
  1728. break;
  1729. case sve_size_hsd:
  1730. insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
  1731. break;
  1732. case sve_size_bh:
  1733. case sve_size_sd:
  1734. insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
  1735. break;
  1736. case sve_size_sd2:
  1737. insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
  1738. break;
  1739. case sve_size_hsd2:
  1740. insert_field (FLD_SVE_size, &inst->value,
  1741. aarch64_get_variant (inst) + 1, 0);
  1742. break;
  1743. case sve_size_tsz_bhs:
  1744. insert_fields (&inst->value,
  1745. (1 << aarch64_get_variant (inst)),
  1746. 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
  1747. break;
  1748. case sve_size_13:
  1749. variant = aarch64_get_variant (inst) + 1;
  1750. if (variant == 2)
  1751. variant = 3;
  1752. insert_field (FLD_size, &inst->value, variant, 0);
  1753. break;
  1754. default:
  1755. break;
  1756. }
  1757. }
  1758. /* Converters converting an alias opcode instruction to its real form. */
  1759. /* ROR <Wd>, <Ws>, #<shift>
  1760. is equivalent to:
  1761. EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
  1762. static void
  1763. convert_ror_to_extr (aarch64_inst *inst)
  1764. {
  1765. copy_operand_info (inst, 3, 2);
  1766. copy_operand_info (inst, 2, 1);
  1767. }
  1768. /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
  1769. is equivalent to:
  1770. USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
  1771. static void
  1772. convert_xtl_to_shll (aarch64_inst *inst)
  1773. {
  1774. inst->operands[2].qualifier = inst->operands[1].qualifier;
  1775. inst->operands[2].imm.value = 0;
  1776. }
  1777. /* Convert
  1778. LSR <Xd>, <Xn>, #<shift>
  1779. to
  1780. UBFM <Xd>, <Xn>, #<shift>, #63. */
  1781. static void
  1782. convert_sr_to_bfm (aarch64_inst *inst)
  1783. {
  1784. inst->operands[3].imm.value =
  1785. inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
  1786. }
  1787. /* Convert MOV to ORR. */
  1788. static void
  1789. convert_mov_to_orr (aarch64_inst *inst)
  1790. {
  1791. /* MOV <Vd>.<T>, <Vn>.<T>
  1792. is equivalent to:
  1793. ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
  1794. copy_operand_info (inst, 2, 1);
  1795. }
  1796. /* When <imms> >= <immr>, the instruction written:
  1797. SBFX <Xd>, <Xn>, #<lsb>, #<width>
  1798. is equivalent to:
  1799. SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
  1800. static void
  1801. convert_bfx_to_bfm (aarch64_inst *inst)
  1802. {
  1803. int64_t lsb, width;
  1804. /* Convert the operand. */
  1805. lsb = inst->operands[2].imm.value;
  1806. width = inst->operands[3].imm.value;
  1807. inst->operands[2].imm.value = lsb;
  1808. inst->operands[3].imm.value = lsb + width - 1;
  1809. }
  1810. /* When <imms> < <immr>, the instruction written:
  1811. SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
  1812. is equivalent to:
  1813. SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
  1814. static void
  1815. convert_bfi_to_bfm (aarch64_inst *inst)
  1816. {
  1817. int64_t lsb, width;
  1818. /* Convert the operand. */
  1819. lsb = inst->operands[2].imm.value;
  1820. width = inst->operands[3].imm.value;
  1821. if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
  1822. {
  1823. inst->operands[2].imm.value = (32 - lsb) & 0x1f;
  1824. inst->operands[3].imm.value = width - 1;
  1825. }
  1826. else
  1827. {
  1828. inst->operands[2].imm.value = (64 - lsb) & 0x3f;
  1829. inst->operands[3].imm.value = width - 1;
  1830. }
  1831. }
  1832. /* The instruction written:
  1833. BFC <Xd>, #<lsb>, #<width>
  1834. is equivalent to:
  1835. BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
  1836. static void
  1837. convert_bfc_to_bfm (aarch64_inst *inst)
  1838. {
  1839. int64_t lsb, width;
  1840. /* Insert XZR. */
  1841. copy_operand_info (inst, 3, 2);
  1842. copy_operand_info (inst, 2, 1);
  1843. copy_operand_info (inst, 1, 0);
  1844. inst->operands[1].reg.regno = 0x1f;
  1845. /* Convert the immediate operand. */
  1846. lsb = inst->operands[2].imm.value;
  1847. width = inst->operands[3].imm.value;
  1848. if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
  1849. {
  1850. inst->operands[2].imm.value = (32 - lsb) & 0x1f;
  1851. inst->operands[3].imm.value = width - 1;
  1852. }
  1853. else
  1854. {
  1855. inst->operands[2].imm.value = (64 - lsb) & 0x3f;
  1856. inst->operands[3].imm.value = width - 1;
  1857. }
  1858. }
  1859. /* The instruction written:
  1860. LSL <Xd>, <Xn>, #<shift>
  1861. is equivalent to:
  1862. UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
  1863. static void
  1864. convert_lsl_to_ubfm (aarch64_inst *inst)
  1865. {
  1866. int64_t shift = inst->operands[2].imm.value;
  1867. if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
  1868. {
  1869. inst->operands[2].imm.value = (32 - shift) & 0x1f;
  1870. inst->operands[3].imm.value = 31 - shift;
  1871. }
  1872. else
  1873. {
  1874. inst->operands[2].imm.value = (64 - shift) & 0x3f;
  1875. inst->operands[3].imm.value = 63 - shift;
  1876. }
  1877. }
  1878. /* CINC <Wd>, <Wn>, <cond>
  1879. is equivalent to:
  1880. CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
  1881. static void
  1882. convert_to_csel (aarch64_inst *inst)
  1883. {
  1884. copy_operand_info (inst, 3, 2);
  1885. copy_operand_info (inst, 2, 1);
  1886. inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
  1887. }
  1888. /* CSET <Wd>, <cond>
  1889. is equivalent to:
  1890. CSINC <Wd>, WZR, WZR, invert(<cond>). */
  1891. static void
  1892. convert_cset_to_csinc (aarch64_inst *inst)
  1893. {
  1894. copy_operand_info (inst, 3, 1);
  1895. copy_operand_info (inst, 2, 0);
  1896. copy_operand_info (inst, 1, 0);
  1897. inst->operands[1].reg.regno = 0x1f;
  1898. inst->operands[2].reg.regno = 0x1f;
  1899. inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
  1900. }
  1901. /* MOV <Wd>, #<imm>
  1902. is equivalent to:
  1903. MOVZ <Wd>, #<imm16>, LSL #<shift>. */
  1904. static void
  1905. convert_mov_to_movewide (aarch64_inst *inst)
  1906. {
  1907. int is32;
  1908. uint32_t shift_amount;
  1909. uint64_t value = ~(uint64_t)0;
  1910. switch (inst->opcode->op)
  1911. {
  1912. case OP_MOV_IMM_WIDE:
  1913. value = inst->operands[1].imm.value;
  1914. break;
  1915. case OP_MOV_IMM_WIDEN:
  1916. value = ~inst->operands[1].imm.value;
  1917. break;
  1918. default:
  1919. return;
  1920. }
  1921. inst->operands[1].type = AARCH64_OPND_HALF;
  1922. is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
  1923. if (! aarch64_wide_constant_p (value, is32, &shift_amount))
  1924. /* The constraint check should have guaranteed this wouldn't happen. */
  1925. return;
  1926. value >>= shift_amount;
  1927. value &= 0xffff;
  1928. inst->operands[1].imm.value = value;
  1929. inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
  1930. inst->operands[1].shifter.amount = shift_amount;
  1931. }
  1932. /* MOV <Wd>, #<imm>
  1933. is equivalent to:
  1934. ORR <Wd>, WZR, #<imm>. */
  1935. static void
  1936. convert_mov_to_movebitmask (aarch64_inst *inst)
  1937. {
  1938. copy_operand_info (inst, 2, 1);
  1939. inst->operands[1].reg.regno = 0x1f;
  1940. inst->operands[1].skip = 0;
  1941. }
  1942. /* Some alias opcodes are assembled by being converted to their real-form. */
  1943. static void
  1944. convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
  1945. {
  1946. const aarch64_opcode *alias = inst->opcode;
  1947. if ((alias->flags & F_CONV) == 0)
  1948. goto convert_to_real_return;
  1949. switch (alias->op)
  1950. {
  1951. case OP_ASR_IMM:
  1952. case OP_LSR_IMM:
  1953. convert_sr_to_bfm (inst);
  1954. break;
  1955. case OP_LSL_IMM:
  1956. convert_lsl_to_ubfm (inst);
  1957. break;
  1958. case OP_CINC:
  1959. case OP_CINV:
  1960. case OP_CNEG:
  1961. convert_to_csel (inst);
  1962. break;
  1963. case OP_CSET:
  1964. case OP_CSETM:
  1965. convert_cset_to_csinc (inst);
  1966. break;
  1967. case OP_UBFX:
  1968. case OP_BFXIL:
  1969. case OP_SBFX:
  1970. convert_bfx_to_bfm (inst);
  1971. break;
  1972. case OP_SBFIZ:
  1973. case OP_BFI:
  1974. case OP_UBFIZ:
  1975. convert_bfi_to_bfm (inst);
  1976. break;
  1977. case OP_BFC:
  1978. convert_bfc_to_bfm (inst);
  1979. break;
  1980. case OP_MOV_V:
  1981. convert_mov_to_orr (inst);
  1982. break;
  1983. case OP_MOV_IMM_WIDE:
  1984. case OP_MOV_IMM_WIDEN:
  1985. convert_mov_to_movewide (inst);
  1986. break;
  1987. case OP_MOV_IMM_LOG:
  1988. convert_mov_to_movebitmask (inst);
  1989. break;
  1990. case OP_ROR_IMM:
  1991. convert_ror_to_extr (inst);
  1992. break;
  1993. case OP_SXTL:
  1994. case OP_SXTL2:
  1995. case OP_UXTL:
  1996. case OP_UXTL2:
  1997. convert_xtl_to_shll (inst);
  1998. break;
  1999. default:
  2000. break;
  2001. }
  2002. convert_to_real_return:
  2003. aarch64_replace_opcode (inst, real);
  2004. }
  2005. /* Encode *INST_ORI of the opcode code OPCODE.
  2006. Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
  2007. matched operand qualifier sequence in *QLF_SEQ. */
  2008. bool
  2009. aarch64_opcode_encode (const aarch64_opcode *opcode,
  2010. const aarch64_inst *inst_ori, aarch64_insn *code,
  2011. aarch64_opnd_qualifier_t *qlf_seq,
  2012. aarch64_operand_error *mismatch_detail,
  2013. aarch64_instr_sequence* insn_sequence)
  2014. {
  2015. int i;
  2016. const aarch64_opcode *aliased;
  2017. aarch64_inst copy, *inst;
  2018. DEBUG_TRACE ("enter with %s", opcode->name);
  2019. /* Create a copy of *INST_ORI, so that we can do any change we want. */
  2020. copy = *inst_ori;
  2021. inst = &copy;
  2022. assert (inst->opcode == NULL || inst->opcode == opcode);
  2023. if (inst->opcode == NULL)
  2024. inst->opcode = opcode;
  2025. /* Constrain the operands.
  2026. After passing this, the encoding is guaranteed to succeed. */
  2027. if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
  2028. {
  2029. DEBUG_TRACE ("FAIL since operand constraint not met");
  2030. return 0;
  2031. }
  2032. /* Get the base value.
  2033. Note: this has to be before the aliasing handling below in order to
  2034. get the base value from the alias opcode before we move on to the
  2035. aliased opcode for encoding. */
  2036. inst->value = opcode->opcode;
  2037. /* No need to do anything else if the opcode does not have any operand. */
  2038. if (aarch64_num_of_operands (opcode) == 0)
  2039. goto encoding_exit;
  2040. /* Assign operand indexes and check types. Also put the matched
  2041. operand qualifiers in *QLF_SEQ to return. */
  2042. for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
  2043. {
  2044. assert (opcode->operands[i] == inst->operands[i].type);
  2045. inst->operands[i].idx = i;
  2046. if (qlf_seq != NULL)
  2047. *qlf_seq = inst->operands[i].qualifier;
  2048. }
  2049. aliased = aarch64_find_real_opcode (opcode);
  2050. /* If the opcode is an alias and it does not ask for direct encoding by
  2051. itself, the instruction will be transformed to the form of real opcode
  2052. and the encoding will be carried out using the rules for the aliased
  2053. opcode. */
  2054. if (aliased != NULL && (opcode->flags & F_CONV))
  2055. {
  2056. DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
  2057. aliased->name, opcode->name);
  2058. /* Convert the operands to the form of the real opcode. */
  2059. convert_to_real (inst, aliased);
  2060. opcode = aliased;
  2061. }
  2062. aarch64_opnd_info *info = inst->operands;
  2063. /* Call the inserter of each operand. */
  2064. for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
  2065. {
  2066. const aarch64_operand *opnd;
  2067. enum aarch64_opnd type = opcode->operands[i];
  2068. if (type == AARCH64_OPND_NIL)
  2069. break;
  2070. if (info->skip)
  2071. {
  2072. DEBUG_TRACE ("skip the incomplete operand %d", i);
  2073. continue;
  2074. }
  2075. opnd = &aarch64_operands[type];
  2076. if (operand_has_inserter (opnd)
  2077. && !aarch64_insert_operand (opnd, info, &inst->value, inst,
  2078. mismatch_detail))
  2079. return false;
  2080. }
  2081. /* Call opcode encoders indicated by flags. */
  2082. if (opcode_has_special_coder (opcode))
  2083. do_special_encoding (inst);
  2084. /* Possibly use the instruction class to encode the chosen qualifier
  2085. variant. */
  2086. aarch64_encode_variant_using_iclass (inst);
  2087. /* Run a verifier if the instruction has one set. */
  2088. if (opcode->verifier)
  2089. {
  2090. enum err_type result = opcode->verifier (inst, *code, 0, true,
  2091. mismatch_detail, insn_sequence);
  2092. switch (result)
  2093. {
  2094. case ERR_UND:
  2095. case ERR_UNP:
  2096. case ERR_NYI:
  2097. return false;
  2098. default:
  2099. break;
  2100. }
  2101. }
  2102. /* Always run constrain verifiers, this is needed because constrains need to
  2103. maintain a global state. Regardless if the instruction has the flag set
  2104. or not. */
  2105. enum err_type result = verify_constraints (inst, *code, 0, true,
  2106. mismatch_detail, insn_sequence);
  2107. switch (result)
  2108. {
  2109. case ERR_UND:
  2110. case ERR_UNP:
  2111. case ERR_NYI:
  2112. return false;
  2113. default:
  2114. break;
  2115. }
  2116. encoding_exit:
  2117. DEBUG_TRACE ("exit with %s", opcode->name);
  2118. *code = inst->value;
  2119. return true;
  2120. }