e500.igen 108 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346
  1. # e500 core instructions, for PSIM, the PowerPC simulator.
  2. # Copyright 2003-2022 Free Software Foundation, Inc.
  3. # Contributed by Red Hat Inc; developed under contract from Motorola.
  4. # Written by matthew green <mrg@redhat.com>.
  5. # This file is part of GDB.
  6. # This program is free software; you can redistribute it and/or modify
  7. # it under the terms of the GNU General Public License as published by
  8. # the Free Software Foundation; either version 3 of the License, or
  9. # (at your option) any later version.
  10. # This program is distributed in the hope that it will be useful,
  11. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. # GNU General Public License for more details.
  14. # You should have received a copy of the GNU General Public License
  15. # along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. #
  17. # e500 Core Complex Instructions
  18. #
  19. :cache:e500::signed_word *:rAh:RA:(cpu_registers(processor)->e500.gprh + RA)
  20. :cache:e500::signed_word *:rSh:RS:(cpu_registers(processor)->e500.gprh + RS)
  21. :cache:e500::signed_word *:rBh:RB:(cpu_registers(processor)->e500.gprh + RB)
  22. # Flags for model.h
  23. ::model-macro:::
  24. #define PPC_INSN_INT_SPR(OUT_MASK, IN_MASK, SPR) \
  25. do { \
  26. if (CURRENT_MODEL_ISSUE > 0) \
  27. ppc_insn_int_spr(MY_INDEX, cpu_model(processor), OUT_MASK, IN_MASK, SPR); \
  28. } while (0)
  29. # Schedule an instruction that takes 2 integer register and produces a special purpose output register plus an integer output register
  30. void::model-function::ppc_insn_int_spr:itable_index index, model_data *model_ptr, const uint32_t out_mask, const uint32_t in_mask, const unsigned nSPR
  31. const uint32_t int_mask = out_mask | in_mask;
  32. model_busy *busy_ptr;
  33. while ((model_ptr->int_busy & int_mask) != 0 || model_ptr->spr_busy[nSPR] != 0) {
  34. if (WITH_TRACE && ppc_trace[trace_model])
  35. model_trace_busy_p(model_ptr, int_mask, 0, 0, nSPR);
  36. model_ptr->nr_stalls_data++;
  37. model_new_cycle(model_ptr);
  38. }
  39. busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
  40. busy_ptr->int_busy |= out_mask;
  41. model_ptr->int_busy |= out_mask;
  42. busy_ptr->spr_busy = nSPR;
  43. model_ptr->spr_busy[nSPR] = 1;
  44. busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_mask)) ? 3 : 2;
  45. TRACE(trace_model,("Making register %s busy.\n", spr_name(nSPR)));
  46. #
  47. # SPE Modulo Fractional Multiplication handling support
  48. #
  49. :function:e500::uint64_t:ev_multiply16_smf:int16_t a, int16_t b, int *sat
  50. int32_t a32 = a, b32 = b, rv32;
  51. rv32 = a * b;
  52. *sat = (rv32 & (3<<30)) == (3<<30);
  53. return (int64_t)rv32 << 1;
  54. :function:e500::uint64_t:ev_multiply32_smf:int32_t a, int32_t b, int *sat
  55. int64_t rv64, a64 = a, b64 = b;
  56. rv64 = a64 * b64;
  57. *sat = (rv64 & ((int64_t)3<<62)) == ((int64_t)3<<62);
  58. /* Loses top sign bit. */
  59. return rv64 << 1;
  60. #
  61. # SPE Saturation handling support
  62. #
  63. :function:e500::int32_t:ev_multiply16_ssf:int16_t a, int16_t b, int *sat
  64. int32_t rv32;
  65. if (a == 0xffff8000 && b == 0xffff8000)
  66. {
  67. rv32 = 0x7fffffffL;
  68. * sat = 1;
  69. return rv32;
  70. }
  71. else
  72. {
  73. int32_t a32 = a, b32 = b;
  74. rv32 = a * b;
  75. * sat = (rv32 & (3<<30)) == (3<<30);
  76. return (int64_t)rv32 << 1;
  77. }
  78. :function:e500::int64_t:ev_multiply32_ssf:int32_t a, int32_t b, int *sat
  79. int64_t rv64;
  80. if (a == 0x80000000 && b == 0x80000000)
  81. {
  82. rv64 = 0x7fffffffffffffffLL;
  83. * sat = 1;
  84. return rv64;
  85. }
  86. else
  87. {
  88. int64_t a64 = a, b64 = b;
  89. rv64 = a64 * b64;
  90. *sat = (rv64 & ((int64_t)3<<62)) == ((int64_t)3<<62);
  91. /* Loses top sign bit. */
  92. return rv64 << 1;
  93. }
  94. #
  95. # SPE FP handling support
  96. #
  97. :function:e500::void:ev_check_guard:sim_fpu *a, int fg, int fx, cpu *processor
  98. uint64_t guard;
  99. guard = sim_fpu_guard(a, 0);
  100. if (guard & 1)
  101. EV_SET_SPEFSCR_BITS(fg);
  102. if (guard & ~1)
  103. EV_SET_SPEFSCR_BITS(fx);
  104. :function:e500::void:booke_sim_fpu_32to:sim_fpu *dst, uint32_t packed
  105. sim_fpu_32to (dst, packed);
  106. /* Set normally unused fields to allow booke arithmetic. */
  107. if (dst->class == sim_fpu_class_infinity)
  108. {
  109. dst->normal_exp = 128;
  110. dst->fraction = ((uint64_t)1 << 60);
  111. }
  112. else if (dst->class == sim_fpu_class_qnan
  113. || dst->class == sim_fpu_class_snan)
  114. {
  115. dst->normal_exp = 128;
  116. /* This is set, but without the implicit bit, so we have to or
  117. in the implicit bit. */
  118. dst->fraction |= ((uint64_t)1 << 60);
  119. }
  120. :function:e500::int:booke_sim_fpu_add:sim_fpu *d, sim_fpu *a, sim_fpu *b, int inv, int over, int under, cpu *processor
  121. int invalid_operand, overflow_result, underflow_result;
  122. int dest_exp;
  123. invalid_operand = 0;
  124. overflow_result = 0;
  125. underflow_result = 0;
  126. /* Treat NaN, Inf, and denorm like normal numbers, and signal invalid
  127. operand if it hasn't already been done. */
  128. if (EV_IS_INFDENORMNAN (a))
  129. {
  130. a->class = sim_fpu_class_number;
  131. EV_SET_SPEFSCR_BITS (inv);
  132. invalid_operand = 1;
  133. }
  134. if (EV_IS_INFDENORMNAN (b))
  135. {
  136. b->class = sim_fpu_class_number;
  137. if (! invalid_operand)
  138. {
  139. EV_SET_SPEFSCR_BITS (inv);
  140. invalid_operand = 1;
  141. }
  142. }
  143. sim_fpu_add (d, a, b);
  144. dest_exp = booke_sim_fpu_exp (d);
  145. /* If this is a denorm, force to zero, and signal underflow if
  146. we haven't already indicated invalid operand. */
  147. if (dest_exp <= -127)
  148. {
  149. int sign = d->sign;
  150. *d = sim_fpu_zero;
  151. d->sign = sign;
  152. if (! invalid_operand)
  153. {
  154. EV_SET_SPEFSCR_BITS (under);
  155. underflow_result = 1;
  156. }
  157. }
  158. /* If this is Inf/NaN, force to pmax/nmax, and signal overflow if
  159. we haven't already indicated invalid operand. */
  160. else if (dest_exp >= 127)
  161. {
  162. int sign = d->sign;
  163. *d = sim_fpu_max32;
  164. d->sign = sign;
  165. if (! invalid_operand)
  166. {
  167. EV_SET_SPEFSCR_BITS (over);
  168. overflow_result = 1;
  169. }
  170. }
  171. /* Destination sign is sign of operand with larger magnitude, or
  172. the sign of the first operand if operands have the same
  173. magnitude. Thus if the result is zero, we force it to have
  174. the sign of the first operand. */
  175. else if (d->fraction == 0)
  176. d->sign = a->sign;
  177. return invalid_operand || overflow_result || underflow_result;
  178. :function:e500::uint32_t:ev_fs_add:uint32_t aa, uint32_t bb, int inv, int over, int under, int fg, int fx, cpu *processor
  179. sim_fpu a, b, d;
  180. uint32_t w;
  181. int exception;
  182. booke_sim_fpu_32to (&a, aa);
  183. booke_sim_fpu_32to (&b, bb);
  184. exception = booke_sim_fpu_add (&d, &a, &b, inv, over, under,
  185. processor);
  186. sim_fpu_to32 (&w, &d);
  187. if (! exception)
  188. ev_check_guard(&d, fg, fx, processor);
  189. return w;
  190. :function:e500::uint32_t:ev_fs_sub:uint32_t aa, uint32_t bb, int inv, int over, int under, int fg, int fx, cpu *processor
  191. sim_fpu a, b, d;
  192. uint32_t w;
  193. int exception;
  194. booke_sim_fpu_32to (&a, aa);
  195. booke_sim_fpu_32to (&b, bb);
  196. /* Invert sign of second operand, and add. */
  197. b.sign = ! b.sign;
  198. exception = booke_sim_fpu_add (&d, &a, &b, inv, over, under,
  199. processor);
  200. sim_fpu_to32 (&w, &d);
  201. if (! exception)
  202. ev_check_guard(&d, fg, fx, processor);
  203. return w;
  204. # sim_fpu_exp leaves the normal_exp field undefined for Inf and NaN.
  205. # The booke algorithms require exp values, so we fake them here.
  206. # fixme: It also apparently does the same for zero, but should not.
  207. :function:e500::uint32_t:booke_sim_fpu_exp:sim_fpu *x
  208. int y = sim_fpu_is (x);
  209. if (y == SIM_FPU_IS_PZERO || y == SIM_FPU_IS_NZERO)
  210. return 0;
  211. else if (y == SIM_FPU_IS_SNAN || y == SIM_FPU_IS_QNAN
  212. || y == SIM_FPU_IS_NINF || y == SIM_FPU_IS_PINF)
  213. return 128;
  214. else
  215. return sim_fpu_exp (x);
  216. :function:e500::uint32_t:ev_fs_mul:uint32_t aa, uint32_t bb, int inv, int over, int under, int fg, int fx, cpu *processor
  217. sim_fpu a, b, d;
  218. uint32_t w;
  219. int sa, sb, ea, eb, ei;
  220. sim_fpu_32to (&a, aa);
  221. sim_fpu_32to (&b, bb);
  222. sa = sim_fpu_sign(&a);
  223. sb = sim_fpu_sign(&b);
  224. ea = booke_sim_fpu_exp(&a);
  225. eb = booke_sim_fpu_exp(&b);
  226. ei = ea + eb + 127;
  227. if (sim_fpu_is_zero (&a) || sim_fpu_is_zero (&b))
  228. w = 0;
  229. else if (sa == sb) {
  230. if (ei >= 254) {
  231. w = EV_PMAX;
  232. EV_SET_SPEFSCR_BITS(over);
  233. } else if (ei < 1) {
  234. d = sim_fpu_zero;
  235. sim_fpu_to32 (&w, &d);
  236. w &= 0x7fffffff; /* Clear sign bit. */
  237. } else {
  238. goto normal_mul;
  239. }
  240. } else {
  241. if (ei >= 254) {
  242. w = EV_NMAX;
  243. EV_SET_SPEFSCR_BITS(over);
  244. } else if (ei < 1) {
  245. d = sim_fpu_zero;
  246. sim_fpu_to32 (&w, &d);
  247. w |= 0x80000000; /* Set sign bit. */
  248. } else {
  249. normal_mul:
  250. if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
  251. EV_SET_SPEFSCR_BITS(inv);
  252. sim_fpu_mul (&d, &a, &b);
  253. sim_fpu_to32 (&w, &d);
  254. }
  255. }
  256. return w;
  257. :function:e500::uint32_t:ev_fs_div:uint32_t aa, uint32_t bb, int inv, int over, int under, int dbz, int fg, int fx, cpu *processor
  258. sim_fpu a, b, d;
  259. uint32_t w;
  260. int sa, sb, ea, eb, ei;
  261. sim_fpu_32to (&a, aa);
  262. sim_fpu_32to (&b, bb);
  263. sa = sim_fpu_sign(&a);
  264. sb = sim_fpu_sign(&b);
  265. ea = booke_sim_fpu_exp(&a);
  266. eb = booke_sim_fpu_exp(&b);
  267. ei = ea - eb + 127;
  268. /* Special cases to handle behaviour of e500 hardware.
  269. cf case 107543. */
  270. if (sim_fpu_is_nan (&a) || sim_fpu_is_nan (&b)
  271. || sim_fpu_is_zero (&a) || sim_fpu_is_zero (&b))
  272. {
  273. if (sim_fpu_is_snan (&a) || sim_fpu_is_snan (&b))
  274. {
  275. if (bb == 0x3f800000)
  276. w = EV_PMAX;
  277. else if (aa == 0x7fc00001)
  278. w = 0x3fbffffe;
  279. else
  280. goto normal_div;
  281. }
  282. else
  283. goto normal_div;
  284. }
  285. else if (sim_fpu_is_infinity (&a) && sim_fpu_is_infinity (&b))
  286. {
  287. if (sa == sb)
  288. sim_fpu_32to (&d, 0x3f800000);
  289. else
  290. sim_fpu_32to (&d, 0xbf800000);
  291. sim_fpu_to32 (&w, &d);
  292. }
  293. else if (sa == sb) {
  294. if (ei > 254) {
  295. w = EV_PMAX;
  296. EV_SET_SPEFSCR_BITS(over);
  297. } else if (ei <= 1) {
  298. d = sim_fpu_zero;
  299. sim_fpu_to32 (&w, &d);
  300. w &= 0x7fffffff; /* Clear sign bit. */
  301. } else {
  302. goto normal_div;
  303. }
  304. } else {
  305. if (ei > 254) {
  306. w = EV_NMAX;
  307. EV_SET_SPEFSCR_BITS(over);
  308. } else if (ei <= 1) {
  309. d = sim_fpu_zero;
  310. sim_fpu_to32 (&w, &d);
  311. w |= 0x80000000; /* Set sign bit. */
  312. } else {
  313. normal_div:
  314. if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
  315. EV_SET_SPEFSCR_BITS(inv);
  316. if (sim_fpu_is_zero (&b))
  317. {
  318. if (sim_fpu_is_zero (&a))
  319. EV_SET_SPEFSCR_BITS(dbz);
  320. else
  321. EV_SET_SPEFSCR_BITS(inv);
  322. w = sa ? EV_NMAX : EV_PMAX;
  323. }
  324. else
  325. {
  326. sim_fpu_div (&d, &a, &b);
  327. sim_fpu_to32 (&w, &d);
  328. ev_check_guard(&d, fg, fx, processor);
  329. }
  330. }
  331. }
  332. return w;
  333. #
  334. # A.2.7 Integer SPE Simple Instructions
  335. #
  336. 0.4,6.RS,11.RA,16.RB,21.512:X:e500:evaddw %RS,%RA,%RB:Vector Add Word
  337. uint32_t w1, w2;
  338. w1 = *rBh + *rAh;
  339. w2 = *rB + *rA;
  340. EV_SET_REG2(*rSh, *rS, w1, w2);
  341. //printf("evaddw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
  342. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  343. 0.4,6.RS,11.IMM,16.RB,21.514:X:e500:evaddiw %RS,%RB,%IMM:Vector Add Immediate Word
  344. uint32_t w1, w2;
  345. w1 = *rBh + IMM;
  346. w2 = *rB + IMM;
  347. EV_SET_REG2(*rSh, *rS, w1, w2);
  348. //printf("evaddiw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
  349. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  350. 0.4,6.RS,11.RA,16.RB,21.516:X:e500:evsubfw %RS,%RA,%RB:Vector Subtract from Word
  351. uint32_t w1, w2;
  352. w1 = *rBh - *rAh;
  353. w2 = *rB - *rA;
  354. EV_SET_REG2(*rSh, *rS, w1, w2);
  355. //printf("evsubfw: *rSh = %08x; *rS = %08x; w1 = %08x w2 = %08x\n", *rSh, *rS, w1, w2);
  356. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  357. 0.4,6.RS,11.IMM,16.RB,21.518:X:e500:evsubifw %RS,%RB,%IMM:Vector Subtract Immediate from Word
  358. uint32_t w1, w2;
  359. w1 = *rBh - IMM;
  360. w2 = *rB - IMM;
  361. EV_SET_REG2(*rSh, *rS, w1, w2);
  362. //printf("evsubifw: *rSh = %08x; *rS = %08x; IMM = %d\n", *rSh, *rS, IMM);
  363. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  364. 0.4,6.RS,11.RA,16.0,21.520:X:e500:evabs %RS,%RA:Vector Absolute Value
  365. int32_t w1, w2;
  366. w1 = *rAh;
  367. if (w1 < 0 && w1 != 0x80000000)
  368. w1 = -w1;
  369. w2 = *rA;
  370. if (w2 < 0 && w2 != 0x80000000)
  371. w2 = -w2;
  372. EV_SET_REG2(*rSh, *rS, w1, w2);
  373. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  374. 0.4,6.RS,11.RA,16.0,21.521:X:e500:evneg %RS,%RA:Vector Negate
  375. int32_t w1, w2;
  376. w1 = *rAh;
  377. /* the negative most negative number is the most negative number */
  378. if (w1 != 0x80000000)
  379. w1 = -w1;
  380. w2 = *rA;
  381. if (w2 != 0x80000000)
  382. w2 = -w2;
  383. EV_SET_REG2(*rSh, *rS, w1, w2);
  384. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  385. 0.4,6.RS,11.RA,16.0,21.522:X:e500:evextsb %RS,%RA:Vector Extend Signed Byte
  386. uint64_t w1, w2;
  387. w1 = *rAh & 0xff;
  388. if (w1 & 0x80)
  389. w1 |= 0xffffff00;
  390. w2 = *rA & 0xff;
  391. if (w2 & 0x80)
  392. w2 |= 0xffffff00;
  393. EV_SET_REG2(*rSh, *rS, w1, w2);
  394. PPC_INSN_INT(RS_BITMASK, RA_BITMASK , 0);
  395. 0.4,6.RS,11.RA,16.0,21.523:X:e500:evextsb %RS,%RA:Vector Extend Signed Half Word
  396. uint64_t w1, w2;
  397. w1 = *rAh & 0xffff;
  398. if (w1 & 0x8000)
  399. w1 |= 0xffff0000;
  400. w2 = *rA & 0xffff;
  401. if (w2 & 0x8000)
  402. w2 |= 0xffff0000;
  403. EV_SET_REG2(*rSh, *rS, w1, w2);
  404. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  405. 0.4,6.RS,11.RA,16.RB,21.529:X:e500:evand %RS,%RA,%RB:Vector AND
  406. uint32_t w1, w2;
  407. w1 = *rBh & *rAh;
  408. w2 = *rB & *rA;
  409. EV_SET_REG2(*rSh, *rS, w1, w2);
  410. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  411. 0.4,6.RS,11.RA,16.RB,21.535:X:e500:evor %RS,%RA,%RB:Vector OR
  412. uint32_t w1, w2;
  413. w1 = *rBh | *rAh;
  414. w2 = *rB | *rA;
  415. EV_SET_REG2(*rSh, *rS, w1, w2);
  416. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  417. 0.4,6.RS,11.RA,16.RB,21.534:X:e500:evxor %RS,%RA,%RB:Vector XOR
  418. uint32_t w1, w2;
  419. w1 = *rBh ^ *rAh;
  420. w2 = *rB ^ *rA;
  421. EV_SET_REG2(*rSh, *rS, w1, w2);
  422. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  423. 0.4,6.RS,11.RA,16.RB,21.542:X:e500:evnand %RS,%RA,%RB:Vector NAND
  424. uint32_t w1, w2;
  425. w1 = ~(*rBh & *rAh);
  426. w2 = ~(*rB & *rA);
  427. EV_SET_REG2(*rSh, *rS, w1, w2);
  428. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  429. 0.4,6.RS,11.RA,16.RB,21.536:X:e500:evnor %RS,%RA,%RB:Vector NOR
  430. uint32_t w1, w2;
  431. w1 = ~(*rBh | *rAh);
  432. w2 = ~(*rB | *rA);
  433. EV_SET_REG2(*rSh, *rS, w1, w2);
  434. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  435. 0.4,6.RS,11.RA,16.RB,21.537:X:e500:eveqv %RS,%RA,%RB:Vector Equivalent
  436. uint32_t w1, w2;
  437. w1 = (~*rBh) ^ *rAh;
  438. w2 = (~*rB) ^ *rA;
  439. EV_SET_REG2(*rSh, *rS, w1, w2);
  440. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  441. 0.4,6.RS,11.RA,16.RB,21.530:X:e500:evandc %RS,%RA,%RB:Vector AND with Compliment
  442. uint32_t w1, w2;
  443. w1 = (~*rBh) & *rAh;
  444. w2 = (~*rB) & *rA;
  445. EV_SET_REG2(*rSh, *rS, w1, w2);
  446. //printf("evandc: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
  447. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  448. 0.4,6.RS,11.RA,16.RB,21.539:X:e500:evorc %RS,%RA,%RB:Vector OR with Compliment
  449. uint32_t w1, w2;
  450. w1 = (~*rBh) | *rAh;
  451. w2 = (~*rB) | *rA;
  452. EV_SET_REG2(*rSh, *rS, w1, w2);
  453. //printf("evorc: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
  454. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  455. 0.4,6.RS,11.RA,16.RB,21.552:X:e500:evrlw %RS,%RA,%RB:Vector Rotate Left Word
  456. uint32_t nh, nl, w1, w2;
  457. nh = *rBh & 0x1f;
  458. nl = *rB & 0x1f;
  459. w1 = ((uint32_t)*rAh) << nh | ((uint32_t)*rAh) >> (32 - nh);
  460. w2 = ((uint32_t)*rA) << nl | ((uint32_t)*rA) >> (32 - nl);
  461. EV_SET_REG2(*rSh, *rS, w1, w2);
  462. //printf("evrlw: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
  463. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  464. 0.4,6.RS,11.RA,16.UIMM,21.554:X:e500:evrlwi %RS,%RA,%UIMM:Vector Rotate Left Word Immediate
  465. uint32_t w1, w2, imm;
  466. imm = (uint32_t)UIMM;
  467. w1 = ((uint32_t)*rAh) << imm | ((uint32_t)*rAh) >> (32 - imm);
  468. w2 = ((uint32_t)*rA) << imm | ((uint32_t)*rA) >> (32 - imm);
  469. EV_SET_REG2(*rSh, *rS, w1, w2);
  470. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  471. 0.4,6.RS,11.RA,16.RB,21.548:X:e500:evslw %RS,%RA,%RB:Vector Shift Left Word
  472. uint32_t nh, nl, w1, w2;
  473. nh = *rBh & 0x1f;
  474. nl = *rB & 0x1f;
  475. w1 = ((uint32_t)*rAh) << nh;
  476. w2 = ((uint32_t)*rA) << nl;
  477. EV_SET_REG2(*rSh, *rS, w1, w2);
  478. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  479. 0.4,6.RS,11.RA,16.UIMM,21.550:X:e500:evslwi %RS,%RA,%UIMM:Vector Shift Left Word Immediate
  480. uint32_t w1, w2, imm = UIMM;
  481. w1 = ((uint32_t)*rAh) << imm;
  482. w2 = ((uint32_t)*rA) << imm;
  483. EV_SET_REG2(*rSh, *rS, w1, w2);
  484. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  485. 0.4,6.RS,11.RA,16.RB,21.545:X:e500:evsrws %RS,%RA,%RB:Vector Shift Right Word Signed
  486. int32_t w1, w2;
  487. uint32_t nh, nl;
  488. nh = *rBh & 0x1f;
  489. nl = *rB & 0x1f;
  490. w1 = ((int32_t)*rAh) >> nh;
  491. w2 = ((int32_t)*rA) >> nl;
  492. EV_SET_REG2(*rSh, *rS, w1, w2);
  493. //printf("evsrws: nh %d nl %d *rSh = %08x; *rS = %08x\n", nh, nl, *rSh, *rS);
  494. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  495. 0.4,6.RS,11.RA,16.RB,21.544:X:e500:evsrwu %RS,%RA,%RB:Vector Shift Right Word Unsigned
  496. uint32_t w1, w2, nh, nl;
  497. nh = *rBh & 0x1f;
  498. nl = *rB & 0x1f;
  499. w1 = ((uint32_t)*rAh) >> nh;
  500. w2 = ((uint32_t)*rA) >> nl;
  501. EV_SET_REG2(*rSh, *rS, w1, w2);
  502. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  503. 0.4,6.RS,11.RA,16.UIMM,21.547:X:e500:evsrwis %RS,%RA,%UIMM:Vector Shift Right Word Immediate Signed
  504. int32_t w1, w2;
  505. uint32_t imm = UIMM;
  506. w1 = ((int32_t)*rAh) >> imm;
  507. w2 = ((int32_t)*rA) >> imm;
  508. EV_SET_REG2(*rSh, *rS, w1, w2);
  509. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  510. 0.4,6.RS,11.RA,16.UIMM,21.546:X:e500:evsrwiu %RS,%RA,%UIMM:Vector Shift Right Word Immediate Unsigned
  511. uint32_t w1, w2, imm = UIMM;
  512. w1 = ((uint32_t)*rAh) >> imm;
  513. w2 = ((uint32_t)*rA) >> imm;
  514. EV_SET_REG2(*rSh, *rS, w1, w2);
  515. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  516. 0.4,6.RS,11.RA,16.0,21.525:X:e500:evcntlzw %RS,%RA:Vector Count Leading Zeros Word
  517. uint32_t w1, w2, mask, c1, c2;
  518. for (c1 = 0, mask = 0x80000000, w1 = *rAh;
  519. !(w1 & mask) && mask != 0; mask >>= 1)
  520. c1++;
  521. for (c2 = 0, mask = 0x80000000, w2 = *rA;
  522. !(w2 & mask) && mask != 0; mask >>= 1)
  523. c2++;
  524. EV_SET_REG2(*rSh, *rS, c1, c2);
  525. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  526. 0.4,6.RS,11.RA,16.0,21.526:X:e500:evcntlsw %RS,%RA:Vector Count Leading Sign Bits Word
  527. uint32_t w1, w2, mask, sign_bit, c1, c2;
  528. for (c1 = 0, mask = 0x80000000, w1 = *rAh, sign_bit = w1 & mask;
  529. ((w1 & mask) == sign_bit) && mask != 0;
  530. mask >>= 1, sign_bit >>= 1)
  531. c1++;
  532. for (c2 = 0, mask = 0x80000000, w2 = *rA, sign_bit = w2 & mask;
  533. ((w2 & mask) == sign_bit) && mask != 0;
  534. mask >>= 1, sign_bit >>= 1)
  535. c2++;
  536. EV_SET_REG2(*rSh, *rS, c1, c2);
  537. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  538. 0.4,6.RS,11.RA,16.0,21.524:X:e500:evrndw %RS,%RA:Vector Round Word
  539. uint32_t w1, w2;
  540. w1 = ((uint32_t)*rAh + 0x8000) & 0xffff0000;
  541. w2 = ((uint32_t)*rA + 0x8000) & 0xffff0000;
  542. EV_SET_REG2(*rSh, *rS, w1, w2);
  543. //printf("evrndw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
  544. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  545. 0.4,6.RS,11.RA,16.RB,21.556:X:e500:evmergehi %RS,%RA,%RB:Vector Merge Hi
  546. uint32_t w1, w2;
  547. w1 = *rAh;
  548. w2 = *rBh;
  549. EV_SET_REG2(*rSh, *rS, w1, w2);
  550. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  551. 0.4,6.RS,11.RA,16.RB,21.557:X:e500:evmergelo %RS,%RA,%RB:Vector Merge Low
  552. uint32_t w1, w2;
  553. w1 = *rA;
  554. w2 = *rB;
  555. EV_SET_REG2(*rSh, *rS, w1, w2);
  556. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  557. 0.4,6.RS,11.RA,16.RB,21.559:X:e500:evmergelohi %RS,%RA,%RB:Vector Merge Low Hi
  558. uint32_t w1, w2;
  559. w1 = *rA;
  560. w2 = *rBh;
  561. EV_SET_REG2(*rSh, *rS, w1, w2);
  562. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  563. 0.4,6.RS,11.RA,16.RB,21.558:X:e500:evmergehilo %RS,%RA,%RB:Vector Merge Hi Low
  564. uint32_t w1, w2;
  565. w1 = *rAh;
  566. w2 = *rB;
  567. EV_SET_REG2(*rSh, *rS, w1, w2);
  568. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  569. 0.4,6.RS,11.SIMM,16.0,21.553:X:e500:evsplati %RS,%SIMM:Vector Splat Immediate
  570. uint32_t w;
  571. w = SIMM & 0x1f;
  572. if (w & 0x10)
  573. w |= 0xffffffe0;
  574. EV_SET_REG2(*rSh, *rS, w, w);
  575. PPC_INSN_INT(RS_BITMASK, 0, 0);
  576. 0.4,6.RS,11.SIMM,16.0,21.555:X:e500:evsplatfi %RS,%SIMM:Vector Splat Fractional Immediate
  577. uint32_t w;
  578. w = SIMM << 27;
  579. EV_SET_REG2(*rSh, *rS, w, w);
  580. PPC_INSN_INT(RS_BITMASK, 0, 0);
  581. 0.4,6.BF,9.0,11.RA,16.RB,21.561:X:e500:evcmpgts %BF,%RA,%RB:Vector Compare Greater Than Signed
  582. int32_t ah, al, bh, bl;
  583. int w, ch, cl;
  584. ah = *rAh;
  585. al = *rA;
  586. bh = *rBh;
  587. bl = *rB;
  588. if (ah > bh)
  589. ch = 1;
  590. else
  591. ch = 0;
  592. if (al > bl)
  593. cl = 1;
  594. else
  595. cl = 0;
  596. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  597. CR_SET(BF, w);
  598. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  599. 0.4,6.BF,9.0,11.RA,16.RB,21.560:X:e500:evcmpgtu %BF,%RA,%RB:Vector Compare Greater Than Unsigned
  600. uint32_t ah, al, bh, bl;
  601. int w, ch, cl;
  602. ah = *rAh;
  603. al = *rA;
  604. bh = *rBh;
  605. bl = *rB;
  606. if (ah > bh)
  607. ch = 1;
  608. else
  609. ch = 0;
  610. if (al > bl)
  611. cl = 1;
  612. else
  613. cl = 0;
  614. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  615. CR_SET(BF, w);
  616. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  617. 0.4,6.BF,9.0,11.RA,16.RB,21.563:X:e500:evcmplts %BF,%RA,%RB:Vector Compare Less Than Signed
  618. int32_t ah, al, bh, bl;
  619. int w, ch, cl;
  620. ah = *rAh;
  621. al = *rA;
  622. bh = *rBh;
  623. bl = *rB;
  624. if (ah < bh)
  625. ch = 1;
  626. else
  627. ch = 0;
  628. if (al < bl)
  629. cl = 1;
  630. else
  631. cl = 0;
  632. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  633. CR_SET(BF, w);
  634. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  635. 0.4,6.BF,9.0,11.RA,16.RB,21.562:X:e500:evcmpltu %BF,%RA,%RB:Vector Compare Less Than Unsigned
  636. uint32_t ah, al, bh, bl;
  637. int w, ch, cl;
  638. ah = *rAh;
  639. al = *rA;
  640. bh = *rBh;
  641. bl = *rB;
  642. if (ah < bh)
  643. ch = 1;
  644. else
  645. ch = 0;
  646. if (al < bl)
  647. cl = 1;
  648. else
  649. cl = 0;
  650. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  651. CR_SET(BF, w);
  652. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  653. 0.4,6.BF,9.0,11.RA,16.RB,21.564:X:e500:evcmpeq %BF,%RA,%RB:Vector Compare Equal
  654. uint32_t ah, al, bh, bl;
  655. int w, ch, cl;
  656. ah = *rAh;
  657. al = *rA;
  658. bh = *rBh;
  659. bl = *rB;
  660. if (ah == bh)
  661. ch = 1;
  662. else
  663. ch = 0;
  664. if (al == bl)
  665. cl = 1;
  666. else
  667. cl = 0;
  668. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  669. CR_SET(BF, w);
  670. //printf("evcmpeq: ch %d cl %d BF %d, CR is now %08x\n", ch, cl, BF, CR);
  671. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  672. 0.4,6.RS,11.RA,16.RB,21.79,29.CRFS:X:e500:evsel %RS,%RA,%RB,%CRFS:Vector Select
  673. uint32_t w1, w2;
  674. int cr;
  675. cr = CR_FIELD(CRFS);
  676. if (cr & 8)
  677. w1 = *rAh;
  678. else
  679. w1 = *rBh;
  680. if (cr & 4)
  681. w2 = *rA;
  682. else
  683. w2 = *rB;
  684. EV_SET_REG2(*rSh, *rS, w1, w2);
  685. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  686. 0.4,6.RS,11.RA,16.RB,21.527:X:e500:brinc %RS,%RA,%RB:Bit Reversed Increment
  687. uint32_t w1, w2, a, d, mask;
  688. mask = (*rB) & 0xffff;
  689. a = (*rA) & 0xffff;
  690. d = EV_BITREVERSE16(1 + EV_BITREVERSE16(a | ~mask));
  691. *rS = ((*rA) & 0xffff0000) | (d & 0xffff);
  692. //printf("brinc: *rS = %08x\n", *rS);
  693. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  694. #
  695. # A.2.8 Integer SPE Complex Instructions
  696. #
  697. 0.4,6.RS,11.RA,16.RB,21.1031:EVX:e500:evmhossf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional
  698. int16_t al, ah, bl, bh;
  699. int32_t tl, th;
  700. int movl, movh;
  701. al = (int16_t) EV_LOHALF (*rA);
  702. ah = (int16_t) EV_LOHALF (*rAh);
  703. bl = (int16_t) EV_LOHALF (*rB);
  704. bh = (int16_t) EV_LOHALF (*rBh);
  705. tl = ev_multiply16_ssf (al, bl, &movl);
  706. th = ev_multiply16_ssf (ah, bh, &movh);
  707. EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
  708. EV_SATURATE (movl, 0x7fffffff, tl));
  709. EV_SET_SPEFSCR_OV (movl, movh);
  710. PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  711. 0.4,6.RS,11.RA,16.RB,21.1063:EVX:e500:evmhossfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional Accumulate
  712. int16_t al, ah, bl, bh;
  713. int32_t tl, th;
  714. int movl, movh;
  715. al = (int16_t) EV_LOHALF (*rA);
  716. ah = (int16_t) EV_LOHALF (*rAh);
  717. bl = (int16_t) EV_LOHALF (*rB);
  718. bh = (int16_t) EV_LOHALF (*rBh);
  719. tl = ev_multiply16_ssf (al, bl, &movl);
  720. th = ev_multiply16_ssf (ah, bh, &movh);
  721. EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
  722. EV_SATURATE (movl, 0x7fffffff, tl));
  723. EV_SET_SPEFSCR_OV (movl, movh);
  724. PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  725. 0.4,6.RS,11.RA,16.RB,21.1039:EVX:e500:evmhosmf %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional
  726. int16_t al, ah, bl, bh;
  727. int32_t tl, th;
  728. int dummy;
  729. al = (int16_t) EV_LOHALF (*rA);
  730. ah = (int16_t) EV_LOHALF (*rAh);
  731. bl = (int16_t) EV_LOHALF (*rB);
  732. bh = (int16_t) EV_LOHALF (*rBh);
  733. tl = ev_multiply16_smf (al, bl, & dummy);
  734. th = ev_multiply16_smf (ah, bh, & dummy);
  735. EV_SET_REG2 (*rSh, *rS, th, tl);
  736. PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  737. 0.4,6.RS,11.RA,16.RB,21.1071:EVX:e500:evmhosmfa %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional Accumulate
  738. int32_t al, ah, bl, bh;
  739. int32_t tl, th;
  740. int dummy;
  741. al = (int16_t) EV_LOHALF (*rA);
  742. ah = (int16_t) EV_LOHALF (*rAh);
  743. bl = (int16_t) EV_LOHALF (*rB);
  744. bh = (int16_t) EV_LOHALF (*rBh);
  745. tl = ev_multiply16_smf (al, bl, & dummy);
  746. th = ev_multiply16_smf (ah, bh, & dummy);
  747. EV_SET_REG2_ACC (*rSh, *rS, th, tl);
  748. PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  749. 0.4,6.RS,11.RA,16.RB,21.1037:EVX:e500:evmhosmi %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer
  750. int32_t al, ah, bl, bh, tl, th;
  751. al = (int32_t)(int16_t)EV_LOHALF(*rA);
  752. ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
  753. bl = (int32_t)(int16_t)EV_LOHALF(*rB);
  754. bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
  755. tl = al * bl;
  756. th = ah * bh;
  757. EV_SET_REG2(*rSh, *rS, th, tl);
  758. //printf("evmhosmi: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
  759. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  760. 0.4,6.RS,11.RA,16.RB,21.1069:EVX:e500:evmhosmia %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer Accumulate
  761. int32_t al, ah, bl, bh, tl, th;
  762. al = (int32_t)(int16_t)EV_LOHALF(*rA);
  763. ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
  764. bl = (int32_t)(int16_t)EV_LOHALF(*rB);
  765. bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
  766. tl = al * bl;
  767. th = ah * bh;
  768. EV_SET_REG2_ACC(*rSh, *rS, th, tl);
  769. //printf("evmhosmia: ACC = %08x; *rSh = %08x; *rS = %08x\n", ACC, *rSh, *rS);
  770. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  771. 0.4,6.RS,11.RA,16.RB,21.1036:EVX:e500:evmhoumi %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer
  772. uint32_t al, ah, bl, bh, tl, th;
  773. al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
  774. ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
  775. bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
  776. bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
  777. tl = al * bl;
  778. th = ah * bh;
  779. EV_SET_REG2(*rSh, *rS, th, tl);
  780. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  781. 0.4,6.RS,11.RA,16.RB,21.1068:EVX:e500:evmhoumia %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer Accumulate
  782. uint32_t al, ah, bl, bh, tl, th;
  783. al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
  784. ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
  785. bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
  786. bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
  787. tl = al * bl;
  788. th = ah * bh;
  789. EV_SET_REG2_ACC(*rSh, *rS, th, tl);
  790. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  791. 0.4,6.RS,11.RA,16.RB,21.1027:EVX:e500:evmhessf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional
  792. int16_t al, ah, bl, bh;
  793. int32_t tl, th;
  794. int movl, movh;
  795. al = (int16_t) EV_HIHALF (*rA);
  796. ah = (int16_t) EV_HIHALF (*rAh);
  797. bl = (int16_t) EV_HIHALF (*rB);
  798. bh = (int16_t) EV_HIHALF (*rBh);
  799. tl = ev_multiply16_ssf (al, bl, &movl);
  800. th = ev_multiply16_ssf (ah, bh, &movh);
  801. EV_SET_REG2 (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
  802. EV_SATURATE (movl, 0x7fffffff, tl));
  803. EV_SET_SPEFSCR_OV (movl, movh);
  804. PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  805. 0.4,6.RS,11.RA,16.RB,21.1059:EVX:e500:evmhessfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional Accumulate
  806. int16_t al, ah, bl, bh;
  807. int32_t tl, th;
  808. int movl, movh;
  809. al = (int16_t) EV_HIHALF (*rA);
  810. ah = (int16_t) EV_HIHALF (*rAh);
  811. bl = (int16_t) EV_HIHALF (*rB);
  812. bh = (int16_t) EV_HIHALF (*rBh);
  813. tl = ev_multiply16_ssf (al, bl, &movl);
  814. th = ev_multiply16_ssf (ah, bh, &movh);
  815. EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE (movh, 0x7fffffff, th),
  816. EV_SATURATE (movl, 0x7fffffff, tl));
  817. EV_SET_SPEFSCR_OV (movl, movh);
  818. PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  819. 0.4,6.RS,11.RA,16.RB,21.1035:EVX:e500:evmhesmf %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional
  820. int16_t al, ah, bl, bh;
  821. int64_t tl, th;
  822. int movl, movh;
  823. al = (int16_t) EV_HIHALF (*rA);
  824. ah = (int16_t) EV_HIHALF (*rAh);
  825. bl = (int16_t) EV_HIHALF (*rB);
  826. bh = (int16_t) EV_HIHALF (*rBh);
  827. tl = ev_multiply16_smf (al, bl, &movl);
  828. th = ev_multiply16_smf (ah, bh, &movh);
  829. EV_SET_REG2 (*rSh, *rS, th, tl);
  830. EV_SET_SPEFSCR_OV (movl, movh);
  831. PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  832. 0.4,6.RS,11.RA,16.RB,21.1067:EVX:e500:evmhesmfa %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional Accumulate
  833. int16_t al, ah, bl, bh;
  834. int32_t tl, th;
  835. int dummy;
  836. al = (int16_t) EV_HIHALF (*rA);
  837. ah = (int16_t) EV_HIHALF (*rAh);
  838. bl = (int16_t) EV_HIHALF (*rB);
  839. bh = (int16_t) EV_HIHALF (*rBh);
  840. tl = ev_multiply16_smf (al, bl, & dummy);
  841. th = ev_multiply16_smf (ah, bh, & dummy);
  842. EV_SET_REG2_ACC (*rSh, *rS, th, tl);
  843. PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  844. 0.4,6.RS,11.RA,16.RB,21.1033:EVX:e500:evmhesmi %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer
  845. int16_t al, ah, bl, bh;
  846. int32_t tl, th;
  847. al = (int16_t) EV_HIHALF (*rA);
  848. ah = (int16_t) EV_HIHALF (*rAh);
  849. bl = (int16_t) EV_HIHALF (*rB);
  850. bh = (int16_t) EV_HIHALF (*rBh);
  851. tl = al * bl;
  852. th = ah * bh;
  853. EV_SET_REG2 (*rSh, *rS, th, tl);
  854. PPC_INSN_INT (RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  855. 0.4,6.RS,11.RA,16.RB,21.1065:EVX:e500:evmhesmia %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer Accumulate
  856. int32_t al, ah, bl, bh, tl, th;
  857. al = (int32_t)(int16_t)EV_HIHALF(*rA);
  858. ah = (int32_t)(int16_t)EV_HIHALF(*rAh);
  859. bl = (int32_t)(int16_t)EV_HIHALF(*rB);
  860. bh = (int32_t)(int16_t)EV_HIHALF(*rBh);
  861. tl = al * bl;
  862. th = ah * bh;
  863. EV_SET_REG2_ACC(*rSh, *rS, th, tl);
  864. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  865. 0.4,6.RS,11.RA,16.RB,21.1032:EVX:e500:evmheumi %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer
  866. uint32_t al, ah, bl, bh, tl, th;
  867. al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
  868. ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
  869. bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
  870. bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
  871. tl = al * bl;
  872. th = ah * bh;
  873. EV_SET_REG2(*rSh, *rS, th, tl);
  874. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  875. 0.4,6.RS,11.RA,16.RB,21.1064:EVX:e500:evmheumia %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer Accumulate
  876. uint32_t al, ah, bl, bh, tl, th;
  877. al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
  878. ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
  879. bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
  880. bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
  881. tl = al * bl;
  882. th = ah * bh;
  883. EV_SET_REG2_ACC(*rSh, *rS, th, tl);
  884. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  885. 0.4,6.RS,11.RA,16.RB,21.1287:EVX:e500:evmhossfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate into Words
  886. int16_t al, ah, bl, bh;
  887. int32_t t1, t2;
  888. int64_t tl, th;
  889. int movl, movh, ovl, ovh;
  890. al = (int16_t) EV_LOHALF (*rA);
  891. ah = (int16_t) EV_LOHALF (*rAh);
  892. bl = (int16_t) EV_LOHALF (*rB);
  893. bh = (int16_t) EV_LOHALF (*rBh);
  894. t1 = ev_multiply16_ssf (ah, bh, &movh);
  895. t2 = ev_multiply16_ssf (al, bl, &movl);
  896. th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
  897. tl = EV_ACCLOW + EV_SATURATE (movl, 0x7fffffff, t2);
  898. ovh = EV_SAT_P_S32 (th);
  899. ovl = EV_SAT_P_S32 (tl);
  900. EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
  901. EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
  902. EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
  903. PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  904. 0.4,6.RS,11.RA,16.RB,21.1285:EVX:e500:evmhossiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate into Words
  905. int32_t al, ah, bl, bh;
  906. int64_t t1, t2, tl, th;
  907. int ovl, ovh;
  908. al = (int32_t)(int16_t)EV_LOHALF(*rA);
  909. ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
  910. bl = (int32_t)(int16_t)EV_LOHALF(*rB);
  911. bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
  912. t1 = ah * bh;
  913. t2 = al * bl;
  914. th = EV_ACCHIGH + t1;
  915. tl = EV_ACCLOW + t2;
  916. ovh = EV_SAT_P_S32(th);
  917. ovl = EV_SAT_P_S32(tl);
  918. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
  919. EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
  920. //printf("evmhossiaaw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
  921. //printf("evmhossiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  922. EV_SET_SPEFSCR_OV(ovl, ovh);
  923. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  924. 0.4,6.RS,11.RA,16.RB,21.1295:EVX:e500:evmhosmfaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate into Words
  925. int32_t al, ah, bl, bh;
  926. int64_t t1, t2, tl, th;
  927. al = (int32_t)(int16_t)EV_LOHALF(*rA);
  928. ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
  929. bl = (int32_t)(int16_t)EV_LOHALF(*rB);
  930. bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
  931. t1 = ((int64_t)ah * bh) << 1;
  932. t2 = ((int64_t)al * bl) << 1;
  933. th = EV_ACCHIGH + (t1 & 0xffffffff);
  934. tl = EV_ACCLOW + (t2 & 0xffffffff);
  935. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  936. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  937. 0.4,6.RS,11.RA,16.RB,21.1293:EVX:e500:evmhosmiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate into Words
  938. int32_t al, ah, bl, bh;
  939. int64_t t1, t2, tl, th;
  940. al = (int32_t)(int16_t)EV_LOHALF(*rA);
  941. ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
  942. bl = (int32_t)(int16_t)EV_LOHALF(*rB);
  943. bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
  944. t1 = ah * bh;
  945. t2 = al * bl;
  946. th = EV_ACCHIGH + t1;
  947. tl = EV_ACCLOW + t2;
  948. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  949. //printf("evmhosmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", al, ah, bl, bh, t1, t2, tl, th);
  950. //printf("evmhosmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  951. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  952. 0.4,6.RS,11.RA,16.RB,21.1284:EVX:e500:evmhousiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate into Words
  953. uint32_t al, ah, bl, bh;
  954. uint64_t t1, t2;
  955. int64_t tl, th;
  956. int ovl, ovh;
  957. al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
  958. ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
  959. bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
  960. bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
  961. t1 = ah * bh;
  962. t2 = al * bl;
  963. th = (int64_t)EV_ACCHIGH + (int64_t)t1;
  964. tl = (int64_t)EV_ACCLOW + (int64_t)t2;
  965. ovh = EV_SAT_P_U32(th);
  966. ovl = EV_SAT_P_U32(tl);
  967. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
  968. EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
  969. //printf("evmhousiaaw: al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qu th %qu\n", al, ah, bl, bh, t1, t2, tl, th);
  970. //printf("evmhousiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  971. EV_SET_SPEFSCR_OV(ovl, ovh);
  972. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  973. 0.4,6.RS,11.RA,16.RB,21.1292:EVX:e500:evmhoumiaaw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate into Words
  974. uint32_t al, ah, bl, bh;
  975. uint32_t t1, t2;
  976. int64_t tl, th;
  977. al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
  978. ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
  979. bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
  980. bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
  981. t1 = ah * bh;
  982. t2 = al * bl;
  983. th = EV_ACCHIGH + t1;
  984. tl = EV_ACCLOW + t2;
  985. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  986. //printf("evmhoumiaaw: al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qu th %qu\n", al, ah, bl, bh, t1, t2, tl, th);
  987. //printf("evmhoumiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  988. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  989. 0.4,6.RS,11.RA,16.RB,21.1283:EVX:e500:evmhessfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate into Words
  990. int16_t al, ah, bl, bh;
  991. int32_t t1, t2;
  992. int64_t tl, th;
  993. int movl, movh, ovl, ovh;
  994. al = (int16_t) EV_HIHALF (*rA);
  995. ah = (int16_t) EV_HIHALF (*rAh);
  996. bl = (int16_t) EV_HIHALF (*rB);
  997. bh = (int16_t) EV_HIHALF (*rBh);
  998. t1 = ev_multiply16_ssf (ah, bh, &movh);
  999. t2 = ev_multiply16_ssf (al, bl, &movl);
  1000. th = EV_ACCHIGH + EV_SATURATE (movh, 0x7fffffff, t1);
  1001. tl = EV_ACCLOW + EV_SATURATE (movl, 0x7fffffff, t2);
  1002. ovh = EV_SAT_P_S32 (th);
  1003. ovl = EV_SAT_P_S32 (tl);
  1004. EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
  1005. EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
  1006. EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
  1007. PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1008. 0.4,6.RS,11.RA,16.RB,21.1281:EVX:e500:evmhessiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate into Words
  1009. int32_t al, ah, bl, bh;
  1010. int64_t t1, t2, tl, th;
  1011. int ovl, ovh;
  1012. al = (int32_t)(int16_t)EV_HIHALF(*rA);
  1013. ah = (int32_t)(int16_t)EV_HIHALF(*rAh);
  1014. bl = (int32_t)(int16_t)EV_HIHALF(*rB);
  1015. bh = (int32_t)(int16_t)EV_HIHALF(*rBh);
  1016. t1 = ah * bh;
  1017. t2 = al * bl;
  1018. th = EV_ACCHIGH + t1;
  1019. tl = EV_ACCLOW + t2;
  1020. ovh = EV_SAT_P_S32(th);
  1021. ovl = EV_SAT_P_S32(tl);
  1022. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
  1023. EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
  1024. //printf("evmhessiaaw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
  1025. //printf("evmhessiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  1026. EV_SET_SPEFSCR_OV(ovl, ovh);
  1027. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1028. 0.4,6.RS,11.RA,16.RB,21.1291:EVX:e500:evmhesmfaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate into Words
  1029. int16_t al, ah, bl, bh;
  1030. int32_t t1, t2, th, tl;
  1031. int dummy;
  1032. al = (int16_t)EV_HIHALF(*rA);
  1033. ah = (int16_t)EV_HIHALF(*rAh);
  1034. bl = (int16_t)EV_HIHALF(*rB);
  1035. bh = (int16_t)EV_HIHALF(*rBh);
  1036. t1 = ev_multiply16_smf (ah, bh, &dummy);
  1037. t2 = ev_multiply16_smf (al, bl, &dummy);
  1038. th = EV_ACCHIGH + t1;
  1039. tl = EV_ACCLOW + t2;
  1040. EV_SET_REG2_ACC(*rSh, *rS, th, tl);
  1041. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1042. 0.4,6.RS,11.RA,16.RB,21.1289:EVX:e500:evmhesmiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate into Words
  1043. int32_t al, ah, bl, bh;
  1044. int64_t t1, t2, tl, th;
  1045. al = (int32_t)(int16_t)EV_HIHALF(*rA);
  1046. ah = (int32_t)(int16_t)EV_HIHALF(*rAh);
  1047. bl = (int32_t)(int16_t)EV_HIHALF(*rB);
  1048. bh = (int32_t)(int16_t)EV_HIHALF(*rBh);
  1049. t1 = ah * bh;
  1050. t2 = al * bl;
  1051. th = EV_ACCHIGH + t1;
  1052. tl = EV_ACCLOW + t2;
  1053. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  1054. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1055. 0.4,6.RS,11.RA,16.RB,21.1280:EVX:e500:evmheusiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate into Words
  1056. uint32_t al, ah, bl, bh;
  1057. uint64_t t1, t2;
  1058. int64_t tl, th;
  1059. int ovl, ovh;
  1060. al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
  1061. ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
  1062. bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
  1063. bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
  1064. t1 = ah * bh;
  1065. t2 = al * bl;
  1066. th = (int64_t)EV_ACCHIGH + (int64_t)t1;
  1067. tl = (int64_t)EV_ACCLOW + (int64_t)t2;
  1068. ovh = EV_SAT_P_U32(th);
  1069. ovl = EV_SAT_P_U32(tl);
  1070. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
  1071. EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
  1072. EV_SET_SPEFSCR_OV(ovl, ovh);
  1073. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1074. 0.4,6.RS,11.RA,16.RB,21.1288:EVX:e500:evmheumiaaw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate into Words
  1075. uint32_t al, ah, bl, bh;
  1076. uint32_t t1, t2;
  1077. uint64_t tl, th;
  1078. al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
  1079. ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
  1080. bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
  1081. bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
  1082. t1 = ah * bh;
  1083. t2 = al * bl;
  1084. th = EV_ACCHIGH + t1;
  1085. tl = EV_ACCLOW + t2;
  1086. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  1087. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1088. 0.4,6.RS,11.RA,16.RB,21.1415:EVX:e500:evmhossfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Fractional and Accumulate Negative into Words
  1089. int16_t al, ah, bl, bh;
  1090. int32_t t1, t2;
  1091. int64_t tl, th;
  1092. int movl, movh, ovl, ovh;
  1093. al = (int16_t) EV_LOHALF (*rA);
  1094. ah = (int16_t) EV_LOHALF (*rAh);
  1095. bl = (int16_t) EV_LOHALF (*rB);
  1096. bh = (int16_t) EV_LOHALF (*rBh);
  1097. t1 = ev_multiply16_ssf (ah, bh, &movh);
  1098. t2 = ev_multiply16_ssf (al, bl, &movl);
  1099. th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
  1100. tl = EV_ACCLOW - EV_SATURATE (movl, 0x7fffffff, t2);
  1101. ovh = EV_SAT_P_S32 (th);
  1102. ovl = EV_SAT_P_S32 (tl);
  1103. EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
  1104. EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
  1105. EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
  1106. PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1107. 0.4,6.RS,11.RA,16.RB,21.1413:EVX:e500:evmhossianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Saturate Integer and Accumulate Negative into Words
  1108. int32_t al, ah, bl, bh;
  1109. int64_t t1, t2, tl, th;
  1110. int ovl, ovh;
  1111. al = (int32_t)(int16_t)EV_LOHALF(*rA);
  1112. ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
  1113. bl = (int32_t)(int16_t)EV_LOHALF(*rB);
  1114. bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
  1115. t1 = ah * bh;
  1116. t2 = al * bl;
  1117. th = EV_ACCHIGH - t1;
  1118. tl = EV_ACCLOW - t2;
  1119. ovh = EV_SAT_P_S32(th);
  1120. ovl = EV_SAT_P_S32(tl);
  1121. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
  1122. EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
  1123. EV_SET_SPEFSCR_OV(ovl, ovh);
  1124. //printf("evmhossianw: ACC = %08x; *rSh = %08x; *rS = %08x\n", ACC, *rSh, *rS);
  1125. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1126. 0.4,6.RS,11.RA,16.RB,21.1423:EVX:e500:evmhosmfanw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Fractional and Accumulate Negative into Words
  1127. int32_t al, ah, bl, bh;
  1128. int64_t t1, t2, tl, th;
  1129. al = (int32_t)(int16_t)EV_LOHALF(*rA);
  1130. ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
  1131. bl = (int32_t)(int16_t)EV_LOHALF(*rB);
  1132. bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
  1133. t1 = ((int64_t)ah * bh) << 1;
  1134. t2 = ((int64_t)al * bl) << 1;
  1135. th = EV_ACCHIGH - (t1 & 0xffffffff);
  1136. tl = EV_ACCLOW - (t2 & 0xffffffff);
  1137. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  1138. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1139. 0.4,6.RS,11.RA,16.RB,21.1421:EVX:e500:evmhosmianw %RS,%RA,%RB:Vector Multiply Half Words Odd Signed Modulo Integer and Accumulate Negative into Words
  1140. int32_t al, ah, bl, bh;
  1141. int64_t t1, t2, tl, th;
  1142. al = (int32_t)(int16_t)EV_LOHALF(*rA);
  1143. ah = (int32_t)(int16_t)EV_LOHALF(*rAh);
  1144. bl = (int32_t)(int16_t)EV_LOHALF(*rB);
  1145. bh = (int32_t)(int16_t)EV_LOHALF(*rBh);
  1146. t1 = ah * bh;
  1147. t2 = al * bl;
  1148. th = EV_ACCHIGH - t1;
  1149. tl = EV_ACCLOW - t2;
  1150. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  1151. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1152. 0.4,6.RS,11.RA,16.RB,21.1412:EVX:e500:evmhousianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Saturate Integer and Accumulate Negative into Words
  1153. uint32_t al, ah, bl, bh;
  1154. uint64_t t1, t2;
  1155. int64_t tl, th;
  1156. int ovl, ovh;
  1157. al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
  1158. ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
  1159. bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
  1160. bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
  1161. t1 = ah * bh;
  1162. t2 = al * bl;
  1163. th = (int64_t)EV_ACCHIGH - (int64_t)t1;
  1164. tl = (int64_t)EV_ACCLOW - (int64_t)t2;
  1165. ovl = EV_SAT_P_U32(tl);
  1166. ovh = EV_SAT_P_U32(th);
  1167. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
  1168. EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
  1169. //printf("evmhousianw: ovh %d ovl %d al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
  1170. //printf("evmoussianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  1171. EV_SET_SPEFSCR_OV(ovl, ovh);
  1172. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1173. 0.4,6.RS,11.RA,16.RB,21.1420:EVX:e500:evmhoumianw %RS,%RA,%RB:Vector Multiply Half Words Odd Unsigned Modulo Integer and Accumulate Negative into Words
  1174. uint32_t al, ah, bl, bh;
  1175. uint32_t t1, t2;
  1176. uint64_t tl, th;
  1177. al = (uint32_t)(uint16_t)EV_LOHALF(*rA);
  1178. ah = (uint32_t)(uint16_t)EV_LOHALF(*rAh);
  1179. bl = (uint32_t)(uint16_t)EV_LOHALF(*rB);
  1180. bh = (uint32_t)(uint16_t)EV_LOHALF(*rBh);
  1181. t1 = ah * bh;
  1182. t2 = al * bl;
  1183. th = EV_ACCHIGH - t1;
  1184. tl = EV_ACCLOW - t2;
  1185. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  1186. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1187. 0.4,6.RS,11.RA,16.RB,21.1411:EVX:e500:evmhessfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Fractional and Accumulate Negative into Words
  1188. int16_t al, ah, bl, bh;
  1189. int32_t t1, t2;
  1190. int64_t tl, th;
  1191. int movl, movh, ovl, ovh;
  1192. al = (int16_t) EV_HIHALF (*rA);
  1193. ah = (int16_t) EV_HIHALF (*rAh);
  1194. bl = (int16_t) EV_HIHALF (*rB);
  1195. bh = (int16_t) EV_HIHALF (*rBh);
  1196. t1 = ev_multiply16_ssf (ah, bh, &movh);
  1197. t2 = ev_multiply16_ssf (al, bl, &movl);
  1198. th = EV_ACCHIGH - EV_SATURATE (movh, 0x7fffffff, t1);
  1199. tl = EV_ACCLOW - EV_SATURATE (movl, 0x7fffffff, t2);
  1200. ovh = EV_SAT_P_S32 (th);
  1201. ovl = EV_SAT_P_S32 (tl);
  1202. EV_SET_REG2_ACC (*rSh, *rS, EV_SATURATE_ACC (ovh, th, 0x80000000, 0x7fffffff, th),
  1203. EV_SATURATE_ACC (ovl, tl, 0x80000000, 0x7fffffff, tl));
  1204. EV_SET_SPEFSCR_OV (movl | ovl, movh | ovh);
  1205. PPC_INSN_INT_SPR (RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1206. 0.4,6.RS,11.RA,16.RB,21.1409:EVX:e500:evmhessianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Saturate Integer and Accumulate Negative into Words
  1207. int32_t al, ah, bl, bh;
  1208. int64_t t1, t2, tl, th;
  1209. int ovl, ovh;
  1210. al = (int32_t)(int16_t)EV_HIHALF(*rA);
  1211. ah = (int32_t)(int16_t)EV_HIHALF(*rAh);
  1212. bl = (int32_t)(int16_t)EV_HIHALF(*rB);
  1213. bh = (int32_t)(int16_t)EV_HIHALF(*rBh);
  1214. t1 = ah * bh;
  1215. t2 = al * bl;
  1216. th = EV_ACCHIGH - t1;
  1217. tl = EV_ACCLOW - t2;
  1218. ovh = EV_SAT_P_S32(th);
  1219. ovl = EV_SAT_P_S32(tl);
  1220. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
  1221. EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
  1222. EV_SET_SPEFSCR_OV(ovl, ovh);
  1223. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1224. 0.4,6.RS,11.RA,16.RB,21.1419:EVX:e500:evmhesmfanw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Fractional and Accumulate Negative into Words
  1225. int32_t al, ah, bl, bh;
  1226. int64_t t1, t2, tl, th;
  1227. al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
  1228. ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
  1229. bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
  1230. bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
  1231. t1 = ((int64_t)ah * bh) << 1;
  1232. t2 = ((int64_t)al * bl) << 1;
  1233. th = EV_ACCHIGH - (t1 & 0xffffffff);
  1234. tl = EV_ACCLOW - (t2 & 0xffffffff);
  1235. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  1236. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1237. 0.4,6.RS,11.RA,16.RB,21.1417:EVX:e500:evmhesmianw %RS,%RA,%RB:Vector Multiply Half Words Even Signed Modulo Integer and Accumulate Negative into Words
  1238. int32_t al, ah, bl, bh;
  1239. int64_t t1, t2, tl, th;
  1240. al = (int32_t)(int16_t)EV_HIHALF(*rA);
  1241. ah = (int32_t)(int16_t)EV_HIHALF(*rAh);
  1242. bl = (int32_t)(int16_t)EV_HIHALF(*rB);
  1243. bh = (int32_t)(int16_t)EV_HIHALF(*rBh);
  1244. t1 = ah * bh;
  1245. t2 = al * bl;
  1246. th = EV_ACCHIGH - t1;
  1247. tl = EV_ACCLOW - t2;
  1248. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  1249. //printf("evmhesmianw: al %d ah %d bl %d bh %d t1 %qd t2 %qd tl %qd th %qd\n", al, ah, bl, bh, t1, t2, tl, th);
  1250. //printf("evmhesmianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  1251. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1252. 0.4,6.RS,11.RA,16.RB,21.1408:EVX:e500:evmheusianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Saturate Integer and Accumulate Negative into Words
  1253. uint32_t al, ah, bl, bh;
  1254. uint64_t t1, t2;
  1255. int64_t tl, th;
  1256. int ovl, ovh;
  1257. al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
  1258. ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
  1259. bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
  1260. bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
  1261. t1 = ah * bh;
  1262. t2 = al * bl;
  1263. th = (int64_t)EV_ACCHIGH - (int64_t)t1;
  1264. tl = (int64_t)EV_ACCLOW - (int64_t)t2;
  1265. ovl = EV_SAT_P_U32(tl);
  1266. ovh = EV_SAT_P_U32(th);
  1267. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0, 0xffffffff, th),
  1268. EV_SATURATE_ACC(ovl, tl, 0, 0xffffffff, tl));
  1269. //printf("evmheusianw: ovh %d ovl %d al %u ah %u bl %u bh %u t1 %qu t2 %qu tl %qd th %qd\n", ovh, ovl, al, ah, bl, bh, t1, t2, tl, th);
  1270. //printf("evmheusianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  1271. EV_SET_SPEFSCR_OV(ovl, ovh);
  1272. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1273. 0.4,6.RS,11.RA,16.RB,21.1416:EVX:e500:evmheumianw %RS,%RA,%RB:Vector Multiply Half Words Even Unsigned Modulo Integer and Accumulate Negative into Words
  1274. uint32_t al, ah, bl, bh;
  1275. uint32_t t1, t2;
  1276. uint64_t tl, th;
  1277. al = (uint32_t)(uint16_t)EV_HIHALF(*rA);
  1278. ah = (uint32_t)(uint16_t)EV_HIHALF(*rAh);
  1279. bl = (uint32_t)(uint16_t)EV_HIHALF(*rB);
  1280. bh = (uint32_t)(uint16_t)EV_HIHALF(*rBh);
  1281. t1 = ah * bh;
  1282. t2 = al * bl;
  1283. th = EV_ACCHIGH - t1;
  1284. tl = EV_ACCLOW - t2;
  1285. EV_SET_REG2_ACC(*rSh, *rS, th & 0xffffffff, tl & 0xffffffff);
  1286. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1287. 0.4,6.RS,11.RA,16.RB,21.1327:EVX:e500:evmhogsmfaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate
  1288. int32_t a, b;
  1289. int64_t t1, t2;
  1290. a = (int32_t)(int16_t)EV_LOHALF(*rA);
  1291. b = (int32_t)(int16_t)EV_LOHALF(*rB);
  1292. t1 = EV_MUL16_SSF(a, b);
  1293. if (t1 & ((uint64_t)1 << 32))
  1294. t1 |= 0xfffffffe00000000;
  1295. t2 = ACC + t1;
  1296. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1297. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1298. 0.4,6.RS,11.RA,16.RB,21.1325:EVX:e500:evmhogsmiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate
  1299. int32_t a, b;
  1300. int64_t t1, t2;
  1301. a = (int32_t)(int16_t)EV_LOHALF(*rA);
  1302. b = (int32_t)(int16_t)EV_LOHALF(*rB);
  1303. t1 = (int64_t)a * (int64_t)b;
  1304. t2 = (int64_t)ACC + t1;
  1305. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1306. //printf("evmhogsmiaa: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
  1307. //printf("evmhogsmiaa: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  1308. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1309. 0.4,6.RS,11.RA,16.RB,21.1324:EVX:e500:evmhogumiaa %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate
  1310. uint32_t a, b;
  1311. uint64_t t1, t2;
  1312. a = (uint32_t)(uint16_t)EV_LOHALF(*rA);
  1313. b = (uint32_t)(uint16_t)EV_LOHALF(*rB);
  1314. t1 = a * b;
  1315. t2 = ACC + t1;
  1316. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1317. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1318. 0.4,6.RS,11.RA,16.RB,21.1323:EVX:e500:evmhegsmfaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate
  1319. int32_t a, b;
  1320. int64_t t1, t2;
  1321. a = (int32_t)(int16_t)EV_HIHALF(*rA);
  1322. b = (int32_t)(int16_t)EV_HIHALF(*rB);
  1323. t1 = EV_MUL16_SSF(a, b);
  1324. if (t1 & ((uint64_t)1 << 32))
  1325. t1 |= 0xfffffffe00000000;
  1326. t2 = ACC + t1;
  1327. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1328. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1329. 0.4,6.RS,11.RA,16.RB,21.1321:EVX:e500:evmhegsmiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate
  1330. int32_t a, b;
  1331. int64_t t1, t2;
  1332. a = (int32_t)(int16_t)EV_HIHALF(*rA);
  1333. b = (int32_t)(int16_t)EV_HIHALF(*rB);
  1334. t1 = (int64_t)(a * b);
  1335. t2 = ACC + t1;
  1336. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1337. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1338. 0.4,6.RS,11.RA,16.RB,21.1320:EVX:e500:evmhegumiaa %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate
  1339. uint32_t a, b;
  1340. uint64_t t1, t2;
  1341. a = (uint32_t)(uint16_t)EV_HIHALF(*rA);
  1342. b = (uint32_t)(uint16_t)EV_HIHALF(*rB);
  1343. t1 = a * b;
  1344. t2 = ACC + t1;
  1345. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1346. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1347. 0.4,6.RS,11.RA,16.RB,21.1455:EVX:e500:evmhogsmfan %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Fractional and Accumulate Negative
  1348. int32_t a, b;
  1349. int64_t t1, t2;
  1350. a = (int32_t)(int16_t)EV_LOHALF(*rA);
  1351. b = (int32_t)(int16_t)EV_LOHALF(*rB);
  1352. t1 = EV_MUL16_SSF(a, b);
  1353. if (t1 & ((uint64_t)1 << 32))
  1354. t1 |= 0xfffffffe00000000;
  1355. t2 = ACC - t1;
  1356. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1357. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1358. 0.4,6.RS,11.RA,16.RB,21.1453:EVX:e500:evmhogsmian %RS,%RA,%RB:Multiply Half Words Odd Guarded Signed Modulo Integer and Accumulate Negative
  1359. int32_t a, b;
  1360. int64_t t1, t2;
  1361. a = (int32_t)(int16_t)EV_LOHALF(*rA);
  1362. b = (int32_t)(int16_t)EV_LOHALF(*rB);
  1363. t1 = (int64_t)a * (int64_t)b;
  1364. t2 = ACC - t1;
  1365. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1366. //printf("evmhogsmian: a %d b %d t1 %qd t2 %qd\n", a, b, t1, t2);
  1367. //printf("evmhogsmian: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  1368. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1369. 0.4,6.RS,11.RA,16.RB,21.1452:EVX:e500:evmhogumian %RS,%RA,%RB:Multiply Half Words Odd Guarded Unsigned Modulo Integer and Accumulate Negative
  1370. uint32_t a, b;
  1371. uint64_t t1, t2;
  1372. a = (uint32_t)(uint16_t)EV_LOHALF(*rA);
  1373. b = (uint32_t)(uint16_t)EV_LOHALF(*rB);
  1374. t1 = (uint64_t)a * (uint64_t)b;
  1375. t2 = ACC - t1;
  1376. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1377. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1378. 0.4,6.RS,11.RA,16.RB,21.1451:EVX:e500:evmhegsmfan %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Fractional and Accumulate Negative
  1379. int32_t a, b;
  1380. int64_t t1, t2;
  1381. a = (int32_t)(int16_t)EV_HIHALF(*rA);
  1382. b = (int32_t)(int16_t)EV_HIHALF(*rB);
  1383. t1 = EV_MUL16_SSF(a, b);
  1384. if (t1 & ((uint64_t)1 << 32))
  1385. t1 |= 0xfffffffe00000000;
  1386. t2 = ACC - t1;
  1387. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1388. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1389. 0.4,6.RS,11.RA,16.RB,21.1449:EVX:e500:evmhegsmian %RS,%RA,%RB:Multiply Half Words Even Guarded Signed Modulo Integer and Accumulate Negative
  1390. int32_t a, b;
  1391. int64_t t1, t2;
  1392. a = (int32_t)(int16_t)EV_HIHALF(*rA);
  1393. b = (int32_t)(int16_t)EV_HIHALF(*rB);
  1394. t1 = (int64_t)a * (int64_t)b;
  1395. t2 = ACC - t1;
  1396. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1397. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1398. 0.4,6.RS,11.RA,16.RB,21.1448:EVX:e500:evmhegumian %RS,%RA,%RB:Multiply Half Words Even Guarded Unsigned Modulo Integer and Accumulate Negative
  1399. uint32_t a, b;
  1400. uint64_t t1, t2;
  1401. a = (uint32_t)(uint16_t)EV_HIHALF(*rA);
  1402. b = (uint32_t)(uint16_t)EV_HIHALF(*rB);
  1403. t1 = (uint64_t)a * (uint64_t)b;
  1404. t2 = ACC - t1;
  1405. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1406. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1407. 0.4,6.RS,11.RA,16.RB,21.1095:EVX:e500:evmwhssf %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional
  1408. int32_t al, ah, bl, bh;
  1409. int64_t t1, t2;
  1410. int movl, movh;
  1411. al = *rA;
  1412. ah = *rAh;
  1413. bl = *rB;
  1414. bh = *rBh;
  1415. t1 = ev_multiply32_ssf(al, bl, &movl);
  1416. t2 = ev_multiply32_ssf(ah, bh, &movh);
  1417. EV_SET_REG2(*rSh, *rS, EV_SATURATE(movh, 0x7fffffff, t2 >> 32),
  1418. EV_SATURATE(movl, 0x7fffffff, t1 >> 32));
  1419. EV_SET_SPEFSCR_OV(movl, movh);
  1420. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1421. 0.4,6.RS,11.RA,16.RB,21.1127:EVX:e500:evmwhssfa %RS,%RA,%RB:Vector Multiply Word High Signed Saturate Fractional and Accumulate
  1422. int32_t al, ah, bl, bh;
  1423. int64_t t1, t2;
  1424. int movl, movh;
  1425. al = *rA;
  1426. ah = *rAh;
  1427. bl = *rB;
  1428. bh = *rBh;
  1429. t1 = ev_multiply32_ssf(al, bl, &movl);
  1430. t2 = ev_multiply32_ssf(ah, bh, &movh);
  1431. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(movh, 0x7fffffff, t2 >> 32),
  1432. EV_SATURATE(movl, 0x7fffffff, t1 >> 32));
  1433. EV_SET_SPEFSCR_OV(movl, movh);
  1434. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1435. 0.4,6.RS,11.RA,16.RB,21.1103:EVX:e500:evmwhsmf %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional
  1436. int32_t al, ah, bl, bh;
  1437. int64_t t1, t2;
  1438. al = *rA;
  1439. ah = *rAh;
  1440. bl = *rB;
  1441. bh = *rBh;
  1442. t1 = EV_MUL32_SSF(al, bl);
  1443. t2 = EV_MUL32_SSF(ah, bh);
  1444. EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
  1445. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1446. 0.4,6.RS,11.RA,16.RB,21.1135:EVX:e500:evmwhsmfa %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Fractional and Accumulate
  1447. int32_t al, ah, bl, bh;
  1448. int64_t t1, t2;
  1449. al = *rA;
  1450. ah = *rAh;
  1451. bl = *rB;
  1452. bh = *rBh;
  1453. t1 = EV_MUL32_SSF(al, bl);
  1454. t2 = EV_MUL32_SSF(ah, bh);
  1455. EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
  1456. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1457. 0.4,6.RS,11.RA,16.RB,21.1101:EVX:e500:evmwhsmi %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer
  1458. int32_t al, ah, bl, bh;
  1459. int64_t t1, t2;
  1460. al = *rA;
  1461. ah = *rAh;
  1462. bl = *rB;
  1463. bh = *rBh;
  1464. t1 = (int64_t)al * (int64_t)bl;
  1465. t2 = (int64_t)ah * (int64_t)bh;
  1466. EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
  1467. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1468. 0.4,6.RS,11.RA,16.RB,21.1133:EVX:e500:evmwhsmia %RS,%RA,%RB:Vector Multiply Word High Signed Modulo Integer and Accumulate
  1469. int32_t al, ah, bl, bh;
  1470. int64_t t1, t2;
  1471. al = *rA;
  1472. ah = *rAh;
  1473. bl = *rB;
  1474. bh = *rBh;
  1475. t1 = (int64_t)al * (int64_t)bl;
  1476. t2 = (int64_t)ah * (int64_t)bh;
  1477. EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
  1478. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1479. 0.4,6.RS,11.RA,16.RB,21.1100:EVX:e500:evmwhumi %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer
  1480. uint32_t al, ah, bl, bh;
  1481. uint64_t t1, t2;
  1482. al = *rA;
  1483. ah = *rAh;
  1484. bl = *rB;
  1485. bh = *rBh;
  1486. t1 = (uint64_t)al * (uint64_t)bl;
  1487. t2 = (uint64_t)ah * (uint64_t)bh;
  1488. EV_SET_REG2(*rSh, *rS, t2 >> 32, t1 >> 32);
  1489. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1490. 0.4,6.RS,11.RA,16.RB,21.1132:EVX:e500:evmwhumia %RS,%RA,%RB:Vector Multiply Word High Unsigned Modulo Integer and Accumulate
  1491. uint32_t al, ah, bl, bh;
  1492. uint64_t t1, t2;
  1493. al = *rA;
  1494. ah = *rAh;
  1495. bl = *rB;
  1496. bh = *rBh;
  1497. t1 = (uint64_t)al * (uint64_t)bl;
  1498. t2 = (uint64_t)ah * (uint64_t)bh;
  1499. EV_SET_REG2_ACC(*rSh, *rS, t2 >> 32, t1 >> 32);
  1500. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1501. 0.4,6.RS,11.RA,16.RB,21.1091:EVX:e500:evmwlssf %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional
  1502. int32_t al, ah, bl, bh;
  1503. int64_t t1, t2;
  1504. int movl, movh;
  1505. al = *rA;
  1506. ah = *rAh;
  1507. bl = *rB;
  1508. bh = *rBh;
  1509. t1 = ev_multiply32_ssf(al, bl, &movl);
  1510. t2 = ev_multiply32_ssf(ah, bh, &movh);
  1511. EV_SET_REG2(*rSh, *rS, EV_SATURATE(movh, 0xffffffff, t2),
  1512. EV_SATURATE(movl, 0xffffffff, t1));
  1513. EV_SET_SPEFSCR_OV(movl, movh);
  1514. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1515. 0.4,6.RS,11.RA,16.RB,21.1123:EVX:e500:evmwlssfa %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate
  1516. int32_t al, ah, bl, bh;
  1517. int64_t t1, t2;
  1518. int movl, movh;
  1519. al = *rA;
  1520. ah = *rAh;
  1521. bl = *rB;
  1522. bh = *rBh;
  1523. t1 = ev_multiply32_ssf(al, bl, &movl);
  1524. t2 = ev_multiply32_ssf(ah, bh, &movh);
  1525. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(movh, 0xffffffff, t2),
  1526. EV_SATURATE(movl, 0xffffffff, t1));
  1527. EV_SET_SPEFSCR_OV(movl, movh);
  1528. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1529. 0.4,6.RS,11.RA,16.RB,21.1099:EVX:e500:evmwlsmf %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional
  1530. int32_t al, ah, bl, bh;
  1531. int64_t t1, t2;
  1532. al = *rA;
  1533. ah = *rAh;
  1534. bl = *rB;
  1535. bh = *rBh;
  1536. t1 = EV_MUL32_SSF(al, bl);
  1537. t2 = EV_MUL32_SSF(ah, bh);
  1538. EV_SET_REG2(*rSh, *rS, t2, t1);
  1539. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1540. 0.4,6.RS,11.RA,16.RB,21.1131:EVX:e500:evmwlsmfa %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate
  1541. int32_t al, ah, bl, bh;
  1542. int64_t t1, t2;
  1543. al = *rA;
  1544. ah = *rAh;
  1545. bl = *rB;
  1546. bh = *rBh;
  1547. t1 = EV_MUL32_SSF(al, bl);
  1548. t2 = EV_MUL32_SSF(ah, bh);
  1549. EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
  1550. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1551. 0.4,6.RS,11.RA,16.RB,21.1096:EVX:e500:evmwlumi %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer
  1552. uint32_t al, ah, bl, bh;
  1553. uint64_t t1, t2;
  1554. al = *rA;
  1555. ah = *rAh;
  1556. bl = *rB;
  1557. bh = *rBh;
  1558. t1 = (uint64_t)al * (uint64_t)bl;
  1559. t2 = (uint64_t)ah * (uint64_t)bh;
  1560. EV_SET_REG2(*rSh, *rS, t2, t1);
  1561. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1562. 0.4,6.RS,11.RA,16.RB,21.1128:EVX:e500:evmwlumia %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate
  1563. uint32_t al, ah, bl, bh;
  1564. uint64_t t1, t2;
  1565. al = *rA;
  1566. ah = *rAh;
  1567. bl = *rB;
  1568. bh = *rBh;
  1569. t1 = (uint64_t)al * (uint64_t)bl;
  1570. t2 = (uint64_t)ah * (uint64_t)bh;
  1571. EV_SET_REG2_ACC(*rSh, *rS, t2, t1);
  1572. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1573. 0.4,6.RS,11.RA,16.RB,21.1347:EVX:e500:evmwlssfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate in Words
  1574. int32_t al, ah, bl, bh;
  1575. int64_t t1, t2, tl, th;
  1576. int movl, movh, ovl, ovh;
  1577. al = *rA;
  1578. ah = *rAh;
  1579. bl = *rB;
  1580. bh = *rBh;
  1581. t1 = ev_multiply32_ssf(ah, bh, &movh);
  1582. t2 = ev_multiply32_ssf(al, bl, &movl);
  1583. th = EV_ACCHIGH + EV_SATURATE(movh, 0xffffffff, t1);
  1584. tl = EV_ACCLOW + EV_SATURATE(movl, 0xffffffff, t2);
  1585. ovh = EV_SAT_P_S32(th);
  1586. ovl = EV_SAT_P_S32(tl);
  1587. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
  1588. EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
  1589. EV_SET_SPEFSCR_OV(movl | ovl, movh | ovh);
  1590. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1591. 0.4,6.RS,11.RA,16.RB,21.1345:EVX:e500:evmwlssiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate in Words
  1592. int32_t al, ah, bl, bh;
  1593. int64_t t1, t2, tl, th;
  1594. int ovl, ovh;
  1595. al = *rA;
  1596. ah = *rAh;
  1597. bl = *rB;
  1598. bh = *rBh;
  1599. t1 = (int64_t)ah * (int64_t)bh;
  1600. t2 = (int64_t)al * (int64_t)bl;
  1601. th = EV_ACCHIGH + (t1 & 0xffffffff);
  1602. tl = EV_ACCLOW + (t2 & 0xffffffff);
  1603. ovh = EV_SAT_P_S32(th);
  1604. ovl = EV_SAT_P_S32(tl);
  1605. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
  1606. EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
  1607. EV_SET_SPEFSCR_OV(ovl, ovh);
  1608. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1609. 0.4,6.RS,11.RA,16.RB,21.1355:EVX:e500:evmwlsmfaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate in Words
  1610. int32_t al, ah, bl, bh;
  1611. int64_t t1, t2;
  1612. int mov;
  1613. al = *rA;
  1614. ah = *rAh;
  1615. bl = *rB;
  1616. bh = *rBh;
  1617. t1 = ev_multiply32_smf(ah, bh, &mov);
  1618. t2 = ev_multiply32_smf(al, bl, &mov);
  1619. EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
  1620. EV_ACCLOW + (t2 & 0xffffffff));
  1621. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1622. 0.4,6.RS,11.RA,16.RB,21.1353:EVX:e500:evmwlsmiaaw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate in Words
  1623. int32_t al, ah, bl, bh;
  1624. int64_t t1, t2;
  1625. al = *rA;
  1626. ah = *rAh;
  1627. bl = *rB;
  1628. bh = *rBh;
  1629. t1 = (int64_t)ah * (int64_t)bh;
  1630. t2 = (int64_t)al * (int64_t)bl;
  1631. EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
  1632. EV_ACCLOW + (t2 & 0xffffffff));
  1633. //printf("evmwlsmiaaw: al %d ah %d bl %d bh %d t1 %qd t2 %qd\n", al, ah, bl, bh, t1, t2);
  1634. //printf("evmwlsmiaaw: *rSh = %08x; *rS = %08x\n", *rSh, *rS);
  1635. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1636. 0.4,6.RS,11.RA,16.RB,21.1344:EVX:e500:evmwlusiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate in Words
  1637. uint32_t al, ah, bl, bh;
  1638. uint64_t t1, t2, tl, th;
  1639. int ovl, ovh;
  1640. al = *rA;
  1641. ah = *rAh;
  1642. bl = *rB;
  1643. bh = *rBh;
  1644. t1 = (uint64_t)ah * (uint64_t)bh;
  1645. t2 = (uint64_t)al * (uint64_t)bl;
  1646. th = EV_ACCHIGH + (t1 & 0xffffffff);
  1647. tl = EV_ACCLOW + (t2 & 0xffffffff);
  1648. ovh = (th >> 32);
  1649. ovl = (tl >> 32);
  1650. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, th),
  1651. EV_SATURATE(ovl, 0xffffffff, tl));
  1652. EV_SET_SPEFSCR_OV(ovl, ovh);
  1653. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1654. 0.4,6.RS,11.RA,16.RB,21.1352:EVX:e500:evmwlumiaaw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate in Words
  1655. uint32_t al, ah, bl, bh;
  1656. uint64_t t1, t2;
  1657. al = *rA;
  1658. ah = *rAh;
  1659. bl = *rB;
  1660. bh = *rBh;
  1661. t1 = (uint64_t)ah * (uint64_t)bh;
  1662. t2 = (uint64_t)al * (uint64_t)bl;
  1663. EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH + (t1 & 0xffffffff),
  1664. EV_ACCLOW + (t2 & 0xffffffff));
  1665. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1666. 0.4,6.RS,11.RA,16.RB,21.1475:EVX:e500:evmwlssfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Fractional and Accumulate Negative in Words
  1667. int32_t al, ah, bl, bh;
  1668. int64_t t1, t2, tl, th;
  1669. int movl, movh, ovl, ovh;
  1670. al = *rA;
  1671. ah = *rAh;
  1672. bl = *rB;
  1673. bh = *rBh;
  1674. t1 = ev_multiply32_ssf(ah, bh, &movh);
  1675. t2 = ev_multiply32_ssf(al, bl, &movl);
  1676. th = EV_ACCHIGH - EV_SATURATE(movh, 0xffffffff, t1);
  1677. tl = EV_ACCLOW - EV_SATURATE(movl, 0xffffffff, t2);
  1678. ovh = EV_SAT_P_S32(th);
  1679. ovl = EV_SAT_P_S32(tl);
  1680. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
  1681. EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
  1682. EV_SET_SPEFSCR_OV(movl | ovl, movh | ovh);
  1683. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1684. 0.4,6.RS,11.RA,16.RB,21.1473:EVX:e500:evmwlssianw %RS,%RA,%RB:Vector Multiply Word Low Signed Saturate Integer and Accumulate Negative in Words
  1685. int32_t al, ah, bl, bh;
  1686. int64_t t1, t2, tl, th;
  1687. int ovl, ovh;
  1688. al = *rA;
  1689. ah = *rAh;
  1690. bl = *rB;
  1691. bh = *rBh;
  1692. t1 = (int64_t)ah * (int64_t)bh;
  1693. t2 = (int64_t)al * (int64_t)bl;
  1694. th = EV_ACCHIGH - (t1 & 0xffffffff);
  1695. tl = EV_ACCLOW - (t2 & 0xffffffff);
  1696. ovh = EV_SAT_P_S32(th);
  1697. ovl = EV_SAT_P_S32(tl);
  1698. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, th, 0x80000000, 0x7fffffff, th),
  1699. EV_SATURATE_ACC(ovl, tl, 0x80000000, 0x7fffffff, tl));
  1700. EV_SET_SPEFSCR_OV(ovl, ovh);
  1701. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1702. 0.4,6.RS,11.RA,16.RB,21.1483:EVX:e500:evmwlsmfanw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Fractional and Accumulate Negative in Words
  1703. int32_t al, ah, bl, bh;
  1704. int64_t t1, t2;
  1705. int mov;
  1706. al = *rA;
  1707. ah = *rAh;
  1708. bl = *rB;
  1709. bh = *rBh;
  1710. t1 = ev_multiply32_smf(ah, bh, &mov);
  1711. t2 = ev_multiply32_smf(al, bl, &mov);
  1712. EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
  1713. EV_ACCLOW - (t2 & 0xffffffff));
  1714. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1715. 0.4,6.RS,11.RA,16.RB,21.1481:EVX:e500:evmwlsmianw %RS,%RA,%RB:Vector Multiply Word Low Signed Modulo Integer and Accumulate Negative in Words
  1716. int32_t al, ah, bl, bh;
  1717. int64_t t1, t2;
  1718. al = *rA;
  1719. ah = *rAh;
  1720. bl = *rB;
  1721. bh = *rBh;
  1722. t1 = (int64_t)ah * (int64_t)bh;
  1723. t2 = (int64_t)al * (int64_t)bl;
  1724. EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
  1725. EV_ACCLOW - (t2 & 0xffffffff));
  1726. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1727. 0.4,6.RS,11.RA,16.RB,21.1472:EVX:e500:evmwlusianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Saturate Integer and Accumulate Negative in Words
  1728. uint32_t al, ah, bl, bh;
  1729. uint64_t t1, t2, tl, th;
  1730. int ovl, ovh;
  1731. al = *rA;
  1732. ah = *rAh;
  1733. bl = *rB;
  1734. bh = *rBh;
  1735. t1 = (uint64_t)ah * (uint64_t)bh;
  1736. t2 = (uint64_t)al * (uint64_t)bl;
  1737. th = EV_ACCHIGH - (t1 & 0xffffffff);
  1738. tl = EV_ACCLOW - (t2 & 0xffffffff);
  1739. ovh = (th >> 32);
  1740. ovl = (tl >> 32);
  1741. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, th),
  1742. EV_SATURATE(ovl, 0xffffffff, tl));
  1743. //printf("evmwlusianw: ovl %d ovh %d al %d ah %d bl %d bh %d t1 %qd t2 %qd th %qd tl %qd\n", ovl, ovh, al, ah, al, bh, t1, t2, th, tl);
  1744. //printf("evmwlusianw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  1745. EV_SET_SPEFSCR_OV(ovl, ovh);
  1746. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1747. 0.4,6.RS,11.RA,16.RB,21.1480:EVX:e500:evmwlumianw %RS,%RA,%RB:Vector Multiply Word Low Unsigned Modulo Integer and Accumulate Negative in Words
  1748. uint32_t al, ah, bl, bh;
  1749. uint64_t t1, t2;
  1750. al = *rA;
  1751. ah = *rAh;
  1752. bl = *rB;
  1753. bh = *rBh;
  1754. t1 = (uint64_t)ah * (uint64_t)bh;
  1755. t2 = (uint64_t)al * (uint64_t)bl;
  1756. EV_SET_REG2_ACC(*rSh, *rS, EV_ACCHIGH - (t1 & 0xffffffff),
  1757. EV_ACCLOW - (t2 & 0xffffffff));
  1758. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1759. 0.4,6.RS,11.RA,16.RB,21.1107:EVX:e500:evmwssf %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional
  1760. int32_t a, b;
  1761. int64_t t;
  1762. int movl;
  1763. a = *rA;
  1764. b = *rB;
  1765. t = ev_multiply32_ssf(a, b, &movl);
  1766. EV_SET_REG1(*rSh, *rS, EV_SATURATE(movl, 0x7fffffffffffffff, t));
  1767. EV_SET_SPEFSCR_OV(movl, 0);
  1768. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1769. 0.4,6.RS,11.RA,16.RB,21.1139:EVX:e500:evmwssfa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate
  1770. int32_t a, b;
  1771. int64_t t;
  1772. int movl;
  1773. a = *rA;
  1774. b = *rB;
  1775. t = ev_multiply32_ssf(a, b, &movl);
  1776. EV_SET_REG1_ACC(*rSh, *rS, EV_SATURATE(movl, 0x7fffffffffffffff, t));
  1777. EV_SET_SPEFSCR_OV(movl, 0);
  1778. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1779. 0.4,6.RS,11.RA,16.RB,21.1115:EVX:e500:evmwsmf %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional
  1780. int32_t a, b;
  1781. int64_t t;
  1782. int movl;
  1783. a = *rA;
  1784. b = *rB;
  1785. t = ev_multiply32_smf(a, b, &movl);
  1786. EV_SET_REG1(*rSh, *rS, t);
  1787. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1788. 0.4,6.RS,11.RA,16.RB,21.1147:EVX:e500:evmwsmfa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate
  1789. int32_t a, b;
  1790. int64_t t;
  1791. int movl;
  1792. a = *rA;
  1793. b = *rB;
  1794. t = ev_multiply32_smf(a, b, &movl);
  1795. EV_SET_REG1_ACC(*rSh, *rS, t);
  1796. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1797. 0.4,6.RS,11.RA,16.RB,21.1113:EVX:e500:evmwsmi %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer
  1798. int32_t a, b;
  1799. int64_t t;
  1800. int movl;
  1801. a = *rA;
  1802. b = *rB;
  1803. t = (int64_t)a * (int64_t)b;
  1804. EV_SET_REG1(*rSh, *rS, t);
  1805. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1806. 0.4,6.RS,11.RA,16.RB,21.1145:EVX:e500:evmwsmia %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate
  1807. int32_t a, b;
  1808. int64_t t;
  1809. int movl;
  1810. a = *rA;
  1811. b = *rB;
  1812. t = (int64_t)a * (int64_t)b;
  1813. EV_SET_REG1_ACC(*rSh, *rS, t);
  1814. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1815. 0.4,6.RS,11.RA,16.RB,21.1112:EVX:e500:evmwumi %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer
  1816. uint32_t a, b;
  1817. uint64_t t;
  1818. int movl;
  1819. a = *rA;
  1820. b = *rB;
  1821. t = (int64_t)a * (int64_t)b;
  1822. EV_SET_REG1(*rSh, *rS, t);
  1823. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1824. 0.4,6.RS,11.RA,16.RB,21.1144:EVX:e500:evmwumia %RS,%RA,%RB:Vector Multiply Word Unigned Modulo Integer and Accumulate
  1825. uint32_t a, b;
  1826. uint64_t t;
  1827. int movl;
  1828. a = *rA;
  1829. b = *rB;
  1830. t = (int64_t)a * (int64_t)b;
  1831. EV_SET_REG1_ACC(*rSh, *rS, t);
  1832. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1833. 0.4,6.RS,11.RA,16.RB,21.1363:EVX:e500:evmwssfaa %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional Add and Accumulate
  1834. int64_t t1, t2;
  1835. int32_t a, b;
  1836. int movl;
  1837. a = *rA;
  1838. b = *rB;
  1839. t1 = ev_multiply32_ssf(a, b, &movl);
  1840. t2 = ACC + EV_SATURATE(movl, 0x7fffffffffffffff, t1);
  1841. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1842. EV_SET_SPEFSCR_OV(movl, 0);
  1843. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1844. 0.4,6.RS,11.RA,16.RB,21.1371:EVX:e500:evmwsmfaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional Add and Accumulate
  1845. int64_t t1, t2;
  1846. int32_t a, b;
  1847. int movl;
  1848. a = *rA;
  1849. b = *rB;
  1850. t1 = ev_multiply32_smf(a, b, &movl);
  1851. t2 = ACC + t1;
  1852. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1853. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1854. 0.4,6.RS,11.RA,16.RB,21.1369:EVX:e500:evmwsmiaa %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer And and Accumulate
  1855. int64_t t1, t2;
  1856. int32_t a, b;
  1857. a = *rA;
  1858. b = *rB;
  1859. t1 = (int64_t)a * (int64_t)b;
  1860. t2 = ACC + t1;
  1861. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1862. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1863. 0.4,6.RS,11.RA,16.RB,21.1368:EVX:e500:evmwumiaa %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer Add and Accumulate
  1864. uint64_t t1, t2;
  1865. uint32_t a, b;
  1866. a = *rA;
  1867. b = *rB;
  1868. t1 = (uint64_t)a * (uint64_t)b;
  1869. t2 = ACC + t1;
  1870. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1871. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1872. 0.4,6.RS,11.RA,16.RB,21.1491:EVX:e500:evmwssfan %RS,%RA,%RB:Vector Multiply Word Signed Saturate Fractional and Accumulate Negative
  1873. int64_t t1, t2;
  1874. int32_t a, b;
  1875. int movl;
  1876. a = *rA;
  1877. b = *rB;
  1878. t1 = ev_multiply32_ssf(a, b, &movl);
  1879. t2 = ACC - EV_SATURATE(movl, 0x7fffffffffffffff, t1);
  1880. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1881. EV_SET_SPEFSCR_OV(movl, 0);
  1882. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  1883. 0.4,6.RS,11.RA,16.RB,21.1499:EVX:e500:evmwsmfan %RS,%RA,%RB:Vector Multiply Word Signed Modulo Fractional and Accumulate Negative
  1884. int64_t t1, t2;
  1885. int32_t a, b;
  1886. int movl;
  1887. a = *rA;
  1888. b = *rB;
  1889. t1 = ev_multiply32_smf(a, b, &movl);
  1890. t2 = ACC - t1;
  1891. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1892. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1893. 0.4,6.RS,11.RA,16.RB,21.1497:EVX:e500:evmwsmian %RS,%RA,%RB:Vector Multiply Word Signed Modulo Integer and Accumulate Negative
  1894. int64_t t1, t2;
  1895. int32_t a, b;
  1896. a = *rA;
  1897. b = *rB;
  1898. t1 = (int64_t)a * (int64_t)b;
  1899. t2 = ACC - t1;
  1900. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1901. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1902. 0.4,6.RS,11.RA,16.RB,21.1496:EVX:e500:evmwumian %RS,%RA,%RB:Vector Multiply Word Unsigned Modulo Integer and Accumulate Negative
  1903. uint64_t t1, t2;
  1904. uint32_t a, b;
  1905. a = *rA;
  1906. b = *rB;
  1907. t1 = (uint64_t)a * (uint64_t)b;
  1908. t2 = ACC - t1;
  1909. EV_SET_REG1_ACC(*rSh, *rS, t2);
  1910. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);
  1911. 0.4,6.RS,11.RA,16.0,21.1217:EVX:e500:evaddssiaaw %RS,%RA:Vector Add Signed Saturate Integer to Accumulator Word
  1912. int64_t t1, t2;
  1913. int32_t al, ah;
  1914. int ovl, ovh;
  1915. al = *rA;
  1916. ah = *rAh;
  1917. t1 = (int64_t)EV_ACCHIGH + (int64_t)ah;
  1918. t2 = (int64_t)EV_ACCLOW + (int64_t)al;
  1919. ovh = EV_SAT_P_S32(t1);
  1920. ovl = EV_SAT_P_S32(t2);
  1921. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1 & ((uint64_t)1 << 32), 0x80000000, 0x7fffffff, t1),
  1922. EV_SATURATE_ACC(ovl, t2 & ((uint64_t)1 << 32), 0x80000000, 0x7fffffff, t2));
  1923. EV_SET_SPEFSCR_OV(ovl, ovh);
  1924. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
  1925. 0.4,6.RS,11.RA,16.0,21.1225:EVX:e500:evaddsmiaaw %RS,%RA:Vector Add Signed Modulo Integer to Accumulator Word
  1926. int64_t t1, t2;
  1927. int32_t al, ah;
  1928. al = *rA;
  1929. ah = *rAh;
  1930. t1 = (int64_t)EV_ACCHIGH + (int64_t)ah;
  1931. t2 = (int64_t)EV_ACCLOW + (int64_t)al;
  1932. EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
  1933. //printf("evaddsmiaaw: al %d ah %d t1 %qd t2 %qd\n", al, ah, t1, t2);
  1934. //printf("evaddsmiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  1935. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  1936. 0.4,6.RS,11.RA,16.0,21.1216:EVX:e500:evaddusiaaw %RS,%RA:Vector Add Unsigned Saturate Integer to Accumulator Word
  1937. int64_t t1, t2;
  1938. uint32_t al, ah;
  1939. int ovl, ovh;
  1940. al = *rA;
  1941. ah = *rAh;
  1942. t1 = (int64_t)EV_ACCHIGH + (int64_t)ah;
  1943. t2 = (int64_t)EV_ACCLOW + (int64_t)al;
  1944. ovh = EV_SAT_P_U32(t1);
  1945. ovl = EV_SAT_P_U32(t2);
  1946. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0xffffffff, t1),
  1947. EV_SATURATE(ovl, 0xffffffff, t2));
  1948. //printf("evaddusiaaw: ovl %d ovh %d al %d ah %d t1 %qd t2 %qd\n", ovl, ovh, al, ah, t1, t2);
  1949. //printf("evaddusiaaw: ACC = %08x.%08x; *rSh = %08x; *rS = %08x\n", (int)(ACC >> 32), (int)ACC, *rSh, *rS);
  1950. EV_SET_SPEFSCR_OV(ovl, ovh);
  1951. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
  1952. 0.4,6.RS,11.RA,16.0,21.1224:EVX:e500:evaddumiaaw %RS,%RA:Vector Add Unsigned Modulo Integer to Accumulator Word
  1953. uint64_t t1, t2;
  1954. uint32_t al, ah;
  1955. al = *rA;
  1956. ah = *rAh;
  1957. t1 = (uint64_t)EV_ACCHIGH + (uint64_t)ah;
  1958. t2 = EV_ACCLOW + al;
  1959. EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
  1960. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  1961. 0.4,6.RS,11.RA,16.0,21.1219:EVX:e500:evsubfssiaaw %RS,%RA:Vector Subtract Signed Saturate Integer to Accumulator Word
  1962. int64_t t1, t2;
  1963. int32_t al, ah;
  1964. int ovl, ovh;
  1965. al = *rA;
  1966. ah = *rAh;
  1967. t1 = (int64_t)EV_ACCHIGH - (int64_t)ah;
  1968. t2 = (int64_t)EV_ACCLOW - (int64_t)al;
  1969. ovh = EV_SAT_P_S32(t1);
  1970. ovl = EV_SAT_P_S32(t2);
  1971. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE_ACC(ovh, t1, 0x80000000, 0x7fffffff, t1),
  1972. EV_SATURATE_ACC(ovl, t2, 0x80000000, 0x7fffffff, t2));
  1973. EV_SET_SPEFSCR_OV(ovl, ovh);
  1974. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
  1975. 0.4,6.RS,11.RA,16.0,21.1227:EVX:e500:evsubfsmiaaw %RS,%RA:Vector Subtract Signed Modulo Integer to Accumulator Word
  1976. int64_t t1, t2;
  1977. int32_t al, ah;
  1978. al = *rA;
  1979. ah = *rAh;
  1980. t1 = (int64_t)EV_ACCHIGH - (int64_t)ah;
  1981. t2 = (int64_t)EV_ACCLOW - (int64_t)al;
  1982. EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
  1983. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  1984. 0.4,6.RS,11.RA,16.0,21.1218:EVX:e500:evsubfusiaaw %RS,%RA:Vector Subtract Unsigned Saturate Integer to Accumulator Word
  1985. int64_t t1, t2;
  1986. uint32_t al, ah;
  1987. int ovl, ovh;
  1988. al = *rA;
  1989. ah = *rAh;
  1990. t1 = (int64_t)EV_ACCHIGH - (int64_t)ah;
  1991. t2 = (int64_t)EV_ACCLOW - (int64_t)al;
  1992. ovh = EV_SAT_P_U32(t1);
  1993. ovl = EV_SAT_P_U32(t2);
  1994. EV_SET_REG2_ACC(*rSh, *rS, EV_SATURATE(ovh, 0, t1),
  1995. EV_SATURATE(ovl, 0, t2));
  1996. EV_SET_SPEFSCR_OV(ovl, ovh);
  1997. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
  1998. 0.4,6.RS,11.RA,16.0,21.1226:EVX:e500:evsubfumiaaw %RS,%RA:Vector Subtract Unsigned Modulo Integer to Accumulator Word
  1999. uint64_t t1, t2;
  2000. uint32_t al, ah;
  2001. al = *rA;
  2002. ah = *rAh;
  2003. t1 = (uint64_t)EV_ACCHIGH - (uint64_t)ah;
  2004. t2 = (uint64_t)EV_ACCLOW - (uint64_t)al;
  2005. EV_SET_REG2_ACC(*rSh, *rS, t1, t2);
  2006. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  2007. 0.4,6.RS,11.RA,16.0,21.1220:EVX:e500:evmra %RS,%RA:Initialize Accumulator
  2008. EV_SET_REG2_ACC(*rSh, *rS, *rAh, *rA);
  2009. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  2010. 0.4,6.RS,11.RA,16.RB,21.1222:EVX:e500:evdivws %RS,%RA,%RB:Vector Divide Word Signed
  2011. int32_t dividendh, dividendl, divisorh, divisorl;
  2012. int32_t w1, w2;
  2013. int ovh, ovl;
  2014. dividendh = *rAh;
  2015. dividendl = *rA;
  2016. divisorh = *rBh;
  2017. divisorl = *rB;
  2018. if (dividendh < 0 && divisorh == 0) {
  2019. w1 = 0x80000000;
  2020. ovh = 1;
  2021. } else if (dividendh > 0 && divisorh == 0) {
  2022. w1 = 0x7fffffff;
  2023. ovh = 1;
  2024. } else if (dividendh == 0x80000000 && divisorh == -1) {
  2025. w1 = 0x7fffffff;
  2026. ovh = 1;
  2027. } else {
  2028. w1 = dividendh / divisorh;
  2029. ovh = 0;
  2030. }
  2031. if (dividendl < 0 && divisorl == 0) {
  2032. w2 = 0x80000000;
  2033. ovl = 1;
  2034. } else if (dividendl > 0 && divisorl == 0) {
  2035. w2 = 0x7fffffff;
  2036. ovl = 1;
  2037. } else if (dividendl == 0x80000000 && divisorl == -1) {
  2038. w2 = 0x7fffffff;
  2039. ovl = 1;
  2040. } else {
  2041. w2 = dividendl / divisorl;
  2042. ovl = 0;
  2043. }
  2044. EV_SET_REG2(*rSh, *rS, w1, w2);
  2045. EV_SET_SPEFSCR_OV(ovl, ovh);
  2046. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
  2047. 0.4,6.RS,11.RA,16.RB,21.1223:EVX:e500:evdivwu %RS,%RA,%RB:Vector Divide Word Unsigned
  2048. uint32_t dividendh, dividendl, divisorh, divisorl;
  2049. uint32_t w1, w2;
  2050. int ovh, ovl;
  2051. dividendh = *rAh;
  2052. dividendl = *rA;
  2053. divisorh = *rBh;
  2054. divisorl = *rB;
  2055. if (divisorh == 0) {
  2056. w1 = 0xffffffff;
  2057. ovh = 1;
  2058. } else {
  2059. w1 = dividendh / divisorh;
  2060. ovh = 0;
  2061. }
  2062. if (divisorl == 0) {
  2063. w2 = 0xffffffff;
  2064. ovl = 1;
  2065. } else {
  2066. w2 = dividendl / divisorl;
  2067. ovl = 0;
  2068. }
  2069. EV_SET_REG2(*rSh, *rS, w1, w2);
  2070. EV_SET_SPEFSCR_OV(ovl, ovh);
  2071. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK, spr_spefscr);
  2072. #
  2073. # A.2.9 Floating Point SPE Instructions
  2074. #
  2075. 0.4,6.RS,11.RA,16.0,21.644:EVX:e500:evfsabs %RS,%RA:Vector Floating-Point Absolute Value
  2076. uint32_t w1, w2;
  2077. w1 = *rAh & 0x7fffffff;
  2078. w2 = *rA & 0x7fffffff;
  2079. EV_SET_REG2(*rSh, *rS, w1, w2);
  2080. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  2081. 0.4,6.RS,11.RA,16.0,21.645:EVX:e500:evfsnabs %RS,%RA:Vector Floating-Point Negative Absolute Value
  2082. uint32_t w1, w2;
  2083. w1 = *rAh | 0x80000000;
  2084. w2 = *rA | 0x80000000;
  2085. EV_SET_REG2(*rSh, *rS, w1, w2);
  2086. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  2087. 0.4,6.RS,11.RA,16.0,21.646:EVX:e500:evfsneg %RS,%RA:Vector Floating-Point Negate
  2088. uint32_t w1, w2;
  2089. w1 = *rAh;
  2090. w2 = *rA;
  2091. w1 = (w1 & 0x7fffffff) | ((~w1) & 0x80000000);
  2092. w2 = (w2 & 0x7fffffff) | ((~w2) & 0x80000000);
  2093. EV_SET_REG2(*rSh, *rS, w1, w2);
  2094. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  2095. 0.4,6.RS,11.RA,16.RB,21.640:EVX:e500:evfsadd %RS,%RA,%RB:Vector Floating-Point Add
  2096. uint32_t w1, w2;
  2097. w1 = ev_fs_add (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
  2098. w2 = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
  2099. EV_SET_REG2(*rSh, *rS, w1, w2);
  2100. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2101. 0.4,6.RS,11.RA,16.RB,21.641:EVX:e500:evfssub %RS,%RA,%RB:Vector Floating-Point Subtract
  2102. uint32_t w1, w2;
  2103. w1 = ev_fs_sub (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
  2104. w2 = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
  2105. EV_SET_REG2(*rSh, *rS, w1, w2);
  2106. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2107. 0.4,6.RS,11.RA,16.RB,21.648:EVX:e500:evfsmul %RS,%RA,%RB:Vector Floating-Point Multiply
  2108. uint32_t w1, w2;
  2109. w1 = ev_fs_mul (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fgh, spefscr_fxh, processor);
  2110. w2 = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
  2111. EV_SET_REG2(*rSh, *rS, w1, w2);
  2112. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2113. 0.4,6.RS,11.RA,16.RB,21.649:EVX:e500:evfsdiv %RS,%RA,%RB:Vector Floating-Point Divide
  2114. int32_t w1, w2;
  2115. w1 = ev_fs_div (*rAh, *rBh, spefscr_finvh, spefscr_fovfh, spefscr_funfh, spefscr_fdbzh, spefscr_fgh, spefscr_fxh, processor);
  2116. w2 = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
  2117. EV_SET_REG2(*rSh, *rS, w1, w2);
  2118. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2119. 0.4,6.BF,9./,11.RA,16.RB,21.652:EVX:e500:evfscmpgt %BF,%RA,%RB:Vector Floating-Point Compare Greater Than
  2120. sim_fpu al, ah, bl, bh;
  2121. int w, ch, cl;
  2122. sim_fpu_32to (&al, *rA);
  2123. sim_fpu_32to (&ah, *rAh);
  2124. sim_fpu_32to (&bl, *rB);
  2125. sim_fpu_32to (&bh, *rBh);
  2126. if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
  2127. EV_SET_SPEFSCR_BITS(spefscr_finv);
  2128. if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
  2129. EV_SET_SPEFSCR_BITS(spefscr_finvh);
  2130. if (sim_fpu_is_gt(&ah, &bh))
  2131. ch = 1;
  2132. else
  2133. ch = 0;
  2134. if (sim_fpu_is_gt(&al, &bl))
  2135. cl = 1;
  2136. else
  2137. cl = 0;
  2138. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  2139. CR_SET(BF, w);
  2140. PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2141. 0.4,6.BF,9./,11.RA,16.RB,21.653:EVX:e500:evfscmplt %BF,%RA,%RB:Vector Floating-Point Compare Less Than
  2142. sim_fpu al, ah, bl, bh;
  2143. int w, ch, cl;
  2144. sim_fpu_32to (&al, *rA);
  2145. sim_fpu_32to (&ah, *rAh);
  2146. sim_fpu_32to (&bl, *rB);
  2147. sim_fpu_32to (&bh, *rBh);
  2148. if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
  2149. EV_SET_SPEFSCR_BITS(spefscr_finv);
  2150. if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
  2151. EV_SET_SPEFSCR_BITS(spefscr_finvh);
  2152. if (sim_fpu_is_lt(&ah, &bh))
  2153. ch = 1;
  2154. else
  2155. ch = 0;
  2156. if (sim_fpu_is_lt(&al, &bl))
  2157. cl = 1;
  2158. else
  2159. cl = 0;
  2160. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  2161. CR_SET(BF, w);
  2162. PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2163. 0.4,6.BF,9./,11.RA,16.RB,21.654:EVX:e500:evfscmpeq %BF,%RA,%RB:Vector Floating-Point Compare Equal
  2164. sim_fpu al, ah, bl, bh;
  2165. int w, ch, cl;
  2166. sim_fpu_32to (&al, *rA);
  2167. sim_fpu_32to (&ah, *rAh);
  2168. sim_fpu_32to (&bl, *rB);
  2169. sim_fpu_32to (&bh, *rBh);
  2170. if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
  2171. EV_SET_SPEFSCR_BITS(spefscr_finv);
  2172. if (EV_IS_INFDENORMNAN(&ah) || EV_IS_INFDENORMNAN(&bh))
  2173. EV_SET_SPEFSCR_BITS(spefscr_finvh);
  2174. if (sim_fpu_is_eq(&ah, &bh))
  2175. ch = 1;
  2176. else
  2177. ch = 0;
  2178. if (sim_fpu_is_eq(&al, &bl))
  2179. cl = 1;
  2180. else
  2181. cl = 0;
  2182. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  2183. CR_SET(BF, w);
  2184. PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2185. 0.4,6.BF,9./,11.RA,16.RB,21.668:EVX:e500:evfststgt %BF,%RA,%RB:Vector Floating-Point Test Greater Than
  2186. sim_fpu al, ah, bl, bh;
  2187. int w, ch, cl;
  2188. sim_fpu_32to (&al, *rA);
  2189. sim_fpu_32to (&ah, *rAh);
  2190. sim_fpu_32to (&bl, *rB);
  2191. sim_fpu_32to (&bh, *rBh);
  2192. if (sim_fpu_is_gt(&ah, &bh))
  2193. ch = 1;
  2194. else
  2195. ch = 0;
  2196. if (sim_fpu_is_gt(&al, &bl))
  2197. cl = 1;
  2198. else
  2199. cl = 0;
  2200. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  2201. CR_SET(BF, w);
  2202. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  2203. 0.4,6.BF,9./,11.RA,16.RB,21.669:EVX:e500:evfststlt %BF,%RA,%RB:Vector Floating-Point Test Less Than
  2204. sim_fpu al, ah, bl, bh;
  2205. int w, ch, cl;
  2206. sim_fpu_32to (&al, *rA);
  2207. sim_fpu_32to (&ah, *rAh);
  2208. sim_fpu_32to (&bl, *rB);
  2209. sim_fpu_32to (&bh, *rBh);
  2210. if (sim_fpu_is_lt(&ah, &bh))
  2211. ch = 1;
  2212. else
  2213. ch = 0;
  2214. if (sim_fpu_is_lt(&al, &bl))
  2215. cl = 1;
  2216. else
  2217. cl = 0;
  2218. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  2219. CR_SET(BF, w);
  2220. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  2221. 0.4,6.BF,9./,11.RA,16.RB,21.670:EVX:e500:evfststeq %BF,%RA,%RB:Vector Floating-Point Test Equal
  2222. sim_fpu al, ah, bl, bh;
  2223. int w, ch, cl;
  2224. sim_fpu_32to (&al, *rA);
  2225. sim_fpu_32to (&ah, *rAh);
  2226. sim_fpu_32to (&bl, *rB);
  2227. sim_fpu_32to (&bh, *rBh);
  2228. if (sim_fpu_is_eq(&ah, &bh))
  2229. ch = 1;
  2230. else
  2231. ch = 0;
  2232. if (sim_fpu_is_eq(&al, &bl))
  2233. cl = 1;
  2234. else
  2235. cl = 0;
  2236. w = ch << 3 | cl << 2 | (ch | cl) << 1 | (ch & cl);
  2237. CR_SET(BF, w);
  2238. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  2239. 0.4,6.RS,11.0,16.RB,21.656:EVX:e500:evfscfui %RS,%RB:Vector Convert Floating-Point from Unsigned Integer
  2240. uint32_t f, w1, w2;
  2241. sim_fpu b;
  2242. sim_fpu_u32to (&b, *rBh, sim_fpu_round_default);
  2243. sim_fpu_to32 (&w1, &b);
  2244. sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
  2245. sim_fpu_to32 (&w2, &b);
  2246. EV_SET_REG2(*rSh, *rS, w1, w2);
  2247. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2248. 0.4,6.RS,11.0,16.RB,21.664:EVX:e500:evfsctuiz %RS,%RB:Vector Convert Floating-Point to Unsigned Integer with Round toward Zero
  2249. uint32_t w1, w2;
  2250. sim_fpu b;
  2251. sim_fpu_32to (&b, *rBh);
  2252. sim_fpu_to32u (&w1, &b, sim_fpu_round_zero);
  2253. sim_fpu_32to (&b, *rB);
  2254. sim_fpu_to32u (&w2, &b, sim_fpu_round_zero);
  2255. EV_SET_REG2(*rSh, *rS, w1, w2);
  2256. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2257. 0.4,6.RS,11.0,16.RB,21.657:EVX:e500:evfscfsi %RS,%RB:Vector Convert Floating-Point from Signed Integer
  2258. int32_t w1, w2;
  2259. sim_fpu b, x, y;
  2260. sim_fpu_i32to (&b, *rBh, sim_fpu_round_default);
  2261. sim_fpu_to32 (&w1, &b);
  2262. sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
  2263. sim_fpu_to32 (&w2, &b);
  2264. EV_SET_REG2(*rSh, *rS, w1, w2);
  2265. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2266. 0.4,6.RS,11.0,16.RB,21.658:EVX:e500:evfscfuf %RS,%RB:Vector Convert Floating-Point from Unsigned Fraction
  2267. uint32_t w1, w2, bh, bl;
  2268. sim_fpu b, x, y;
  2269. bh = *rBh;
  2270. if (bh == 0xffffffff)
  2271. sim_fpu_to32 (&w1, &sim_fpu_one);
  2272. else {
  2273. sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
  2274. sim_fpu_u32to (&y, bh, sim_fpu_round_default);
  2275. sim_fpu_div (&b, &y, &x);
  2276. sim_fpu_to32 (&w1, &b);
  2277. }
  2278. bl = *rB;
  2279. if (bl == 0xffffffff)
  2280. sim_fpu_to32 (&w2, &sim_fpu_one);
  2281. else {
  2282. sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
  2283. sim_fpu_u32to (&y, bl, sim_fpu_round_default);
  2284. sim_fpu_div (&b, &y, &x);
  2285. sim_fpu_to32 (&w2, &b);
  2286. }
  2287. EV_SET_REG2(*rSh, *rS, w1, w2);
  2288. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2289. 0.4,6.RS,11.0,16.RB,21.659:EVX:e500:evfscfsf %RS,%RB:Vector Convert Floating-Point from Signed Fraction
  2290. uint32_t w1, w2;
  2291. sim_fpu b, x, y;
  2292. sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
  2293. sim_fpu_i32to (&y, *rBh, sim_fpu_round_default);
  2294. sim_fpu_div (&b, &y, &x);
  2295. sim_fpu_to32 (&w1, &b);
  2296. sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
  2297. sim_fpu_i32to (&y, *rB, sim_fpu_round_default);
  2298. sim_fpu_div (&b, &y, &x);
  2299. sim_fpu_to32 (&w2, &b);
  2300. EV_SET_REG2(*rSh, *rS, w1, w2);
  2301. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2302. 0.4,6.RS,11.0,16.RB,21.660:EVX:e500:evfsctui %RS,%RB:Vector Convert Floating-Point to Unsigned Integer
  2303. uint32_t w1, w2;
  2304. sim_fpu b;
  2305. sim_fpu_32to (&b, *rBh);
  2306. sim_fpu_to32u (&w1, &b, sim_fpu_round_default);
  2307. sim_fpu_32to (&b, *rB);
  2308. sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
  2309. EV_SET_REG2(*rSh, *rS, w1, w2);
  2310. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2311. 0.4,6.RS,11.0,16.RB,21.661:EVX:e500:evfsctsi %RS,%RB:Vector Convert Floating-Point to Signed Integer
  2312. int32_t w1, w2;
  2313. sim_fpu b;
  2314. sim_fpu_32to (&b, *rBh);
  2315. sim_fpu_to32i (&w1, &b, sim_fpu_round_default);
  2316. sim_fpu_32to (&b, *rB);
  2317. sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
  2318. EV_SET_REG2(*rSh, *rS, w1, w2);
  2319. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2320. 0.4,6.RS,11.0,16.RB,21.666:EVX:e500:evfsctsiz %RS,%RB:Vector Convert Floating-Point to Signed Integer with Round toward Zero
  2321. int32_t w1, w2;
  2322. sim_fpu b;
  2323. sim_fpu_32to (&b, *rBh);
  2324. sim_fpu_to32i (&w1, &b, sim_fpu_round_zero);
  2325. sim_fpu_32to (&b, *rB);
  2326. sim_fpu_to32i (&w2, &b, sim_fpu_round_zero);
  2327. EV_SET_REG2(*rSh, *rS, w1, w2);
  2328. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2329. 0.4,6.RS,11.0,16.RB,21.662:EVX:e500:evfsctuf %RS,%RB:Vector Convert Floating-Point to Unsigned Fraction
  2330. uint32_t w1, w2;
  2331. sim_fpu b, x, y;
  2332. sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
  2333. sim_fpu_32to (&y, *rBh);
  2334. sim_fpu_mul (&b, &y, &x);
  2335. sim_fpu_to32u (&w1, &b, sim_fpu_round_default);
  2336. sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
  2337. sim_fpu_32to (&y, *rB);
  2338. sim_fpu_mul (&b, &y, &x);
  2339. sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
  2340. EV_SET_REG2(*rSh, *rS, w1, w2);
  2341. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2342. 0.4,6.RS,11.0,16.RB,21.663:EVX:e500:evfsctsf %RS,%RB:Vector Convert Floating-Point to Signed Fraction
  2343. int32_t w1, w2;
  2344. sim_fpu b, x, y;
  2345. sim_fpu_32to (&y, *rBh);
  2346. sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
  2347. sim_fpu_mul (&b, &y, &x);
  2348. sim_fpu_to32i (&w1, &b, sim_fpu_round_near);
  2349. sim_fpu_32to (&y, *rB);
  2350. sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
  2351. sim_fpu_mul (&b, &y, &x);
  2352. sim_fpu_to32i (&w2, &b, sim_fpu_round_near);
  2353. EV_SET_REG2(*rSh, *rS, w1, w2);
  2354. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2355. 0.4,6.RS,11.RA,16.0,21.708:EVX:e500:efsabs %RS,%RA:Floating-Point Absolute Value
  2356. uint32_t w1, w2;
  2357. w1 = *rSh;
  2358. w2 = *rA & 0x7fffffff;
  2359. EV_SET_REG2(*rSh, *rS, w1, w2);
  2360. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  2361. 0.4,6.RS,11.RA,16.0,21.709:EVX:e500:efsnabs %RS,%RA:Floating-Point Negative Absolute Value
  2362. uint32_t w1, w2;
  2363. w1 = *rSh;
  2364. w2 = *rA | 0x80000000;
  2365. EV_SET_REG2(*rSh, *rS, w1, w2);
  2366. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  2367. 0.4,6.RS,11.RA,16.0,21.710:EVX:e500:efsneg %RS,%RA:Floating-Point Negate
  2368. uint32_t w1, w2;
  2369. w1 = *rSh;
  2370. w2 = (*rA & 0x7fffffff) | ((~*rA) & 0x80000000);
  2371. EV_SET_REG2(*rSh, *rS, w1, w2);
  2372. PPC_INSN_INT(RS_BITMASK, RA_BITMASK, 0);
  2373. 0.4,6.RS,11.RA,16.RB,21.704:EVX:e500:efsadd %RS,%RA,%RB:Floating-Point Add
  2374. uint32_t w;
  2375. w = ev_fs_add (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
  2376. EV_SET_REG(*rS, w);
  2377. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2378. 0.4,6.RS,11.RA,16.RB,21.705:EVX:e500:efssub %RS,%RA,%RB:Floating-Point Subtract
  2379. uint32_t w;
  2380. w = ev_fs_sub (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
  2381. EV_SET_REG(*rS, w);
  2382. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2383. 0.4,6.RS,11.RA,16.RB,21.712:EVX:e500:efsmul %RS,%RA,%RB:Floating-Point Multiply
  2384. uint32_t w;
  2385. w = ev_fs_mul (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fgh, spefscr_fxh, processor);
  2386. EV_SET_REG(*rS, w);
  2387. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2388. 0.4,6.RS,11.RA,16.RB,21.713:EVX:e500:efsdiv %RS,%RA,%RB:Floating-Point Divide
  2389. uint32_t w;
  2390. w = ev_fs_div (*rA, *rB, spefscr_finv, spefscr_fovf, spefscr_funf, spefscr_fdbz, spefscr_fg, spefscr_fx, processor);
  2391. EV_SET_REG(*rS, w);
  2392. PPC_INSN_INT_SPR(RS_BITMASK, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2393. 0.4,6.BF,9./,11.RA,16.RB,21.716:EVX:e500:efscmpgt %BF,%RA,%RB:Floating-Point Compare Greater Than
  2394. sim_fpu a, b;
  2395. int w, cl;
  2396. sim_fpu_32to (&a, *rA);
  2397. sim_fpu_32to (&b, *rB);
  2398. if (EV_IS_INFDENORMNAN(&a) || EV_IS_INFDENORMNAN(&b))
  2399. EV_SET_SPEFSCR_BITS(spefscr_finv);
  2400. if (sim_fpu_is_gt(&a, &b))
  2401. cl = 1;
  2402. else
  2403. cl = 0;
  2404. w = cl << 2 | cl << 1;
  2405. CR_SET(BF, w);
  2406. PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2407. 0.4,6.BF,9./,11.RA,16.RB,21.717:EVX:e500:efscmplt %BF,%RA,%RB:Floating-Point Compare Less Than
  2408. sim_fpu al, bl;
  2409. int w, cl;
  2410. sim_fpu_32to (&al, *rA);
  2411. sim_fpu_32to (&bl, *rB);
  2412. if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
  2413. EV_SET_SPEFSCR_BITS(spefscr_finv);
  2414. if (sim_fpu_is_lt(&al, &bl))
  2415. cl = 1;
  2416. else
  2417. cl = 0;
  2418. w = cl << 2 | cl << 1;
  2419. CR_SET(BF, w);
  2420. PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2421. 0.4,6.BF,9./,11.RA,16.RB,21.718:EVX:e500:efscmpeq %BF,%RA,%RB:Floating-Point Compare Equal
  2422. sim_fpu al, bl;
  2423. int w, cl;
  2424. sim_fpu_32to (&al, *rA);
  2425. sim_fpu_32to (&bl, *rB);
  2426. if (EV_IS_INFDENORMNAN(&al) || EV_IS_INFDENORMNAN(&bl))
  2427. EV_SET_SPEFSCR_BITS(spefscr_finv);
  2428. if (sim_fpu_is_eq(&al, &bl))
  2429. cl = 1;
  2430. else
  2431. cl = 0;
  2432. w = cl << 2 | cl << 1;
  2433. CR_SET(BF, w);
  2434. PPC_INSN_INT_SPR(0, RA_BITMASK | RB_BITMASK, spr_spefscr);
  2435. 0.4,6.BF,9./,11.RA,16.RB,21.732:EVX:e500:efststgt %BF,%RA,%RB:Floating-Point Test Greater Than
  2436. sim_fpu al, bl;
  2437. int w, cl;
  2438. sim_fpu_32to (&al, *rA);
  2439. sim_fpu_32to (&bl, *rB);
  2440. if (sim_fpu_is_gt(&al, &bl))
  2441. cl = 1;
  2442. else
  2443. cl = 0;
  2444. w = cl << 2 | cl << 1;
  2445. CR_SET(BF, w);
  2446. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  2447. 0.4,6.BF,9./,11.RA,16.RB,21.733:EVX:e500:efststlt %BF,%RA,%RB:Floating-Point Test Less Than
  2448. sim_fpu al, bl;
  2449. int w, cl;
  2450. sim_fpu_32to (&al, *rA);
  2451. sim_fpu_32to (&bl, *rB);
  2452. if (sim_fpu_is_lt(&al, &bl))
  2453. cl = 1;
  2454. else
  2455. cl = 0;
  2456. w = cl << 2 | cl << 1;
  2457. CR_SET(BF, w);
  2458. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  2459. 0.4,6.BF,9./,11.RA,16.RB,21.734:EVX:e500:efststeq %BF,%RA,%RB:Floating-Point Test Equal
  2460. sim_fpu al, bl;
  2461. int w, cl;
  2462. sim_fpu_32to (&al, *rA);
  2463. sim_fpu_32to (&bl, *rB);
  2464. if (sim_fpu_is_eq(&al, &bl))
  2465. cl = 1;
  2466. else
  2467. cl = 0;
  2468. w = cl << 2 | cl << 1;
  2469. CR_SET(BF, w);
  2470. PPC_INSN_INT_CR(0, RA_BITMASK | RB_BITMASK, BF_BITMASK);
  2471. 0.4,6.RS,11.0,16.RB,21.721:EVX:e500:efscfsi %RS,%RB:Convert Floating-Point from Signed Integer
  2472. int32_t f, w1, w2;
  2473. sim_fpu b;
  2474. w1 = *rSh;
  2475. sim_fpu_i32to (&b, *rB, sim_fpu_round_default);
  2476. sim_fpu_to32 (&w2, &b);
  2477. EV_SET_REG2(*rSh, *rS, w1, w2);
  2478. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2479. 0.4,6.RS,11.0,16.RB,21.720:EVX:e500:efscfui %RS,%RB:Convert Floating-Point from Unsigned Integer
  2480. uint32_t w1, w2;
  2481. sim_fpu b;
  2482. w1 = *rSh;
  2483. sim_fpu_u32to (&b, *rB, sim_fpu_round_default);
  2484. sim_fpu_to32 (&w2, &b);
  2485. EV_SET_REG2(*rSh, *rS, w1, w2);
  2486. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2487. 0.4,6.RS,11.0,16.RB,21.723:EVX:e500:efscfsf %RS,%RB:Convert Floating-Point from Signed Fraction
  2488. uint32_t w1, w2;
  2489. sim_fpu b, x, y;
  2490. w1 = *rSh;
  2491. sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
  2492. sim_fpu_i32to (&y, *rB, sim_fpu_round_default);
  2493. sim_fpu_div (&b, &y, &x);
  2494. sim_fpu_to32 (&w2, &b);
  2495. EV_SET_REG2(*rSh, *rS, w1, w2);
  2496. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2497. 0.4,6.RS,11.0,16.RB,21.722:EVX:e500:efscfuf %RS,%RB:Convert Floating-Point from Unsigned Fraction
  2498. uint32_t w1, w2, bl;
  2499. sim_fpu b, x, y;
  2500. w1 = *rSh;
  2501. bl = *rB;
  2502. if (bl == 0xffffffff)
  2503. sim_fpu_to32 (&w2, &sim_fpu_one);
  2504. else {
  2505. sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
  2506. sim_fpu_u32to (&y, bl, sim_fpu_round_default);
  2507. sim_fpu_div (&b, &y, &x);
  2508. sim_fpu_to32 (&w2, &b);
  2509. }
  2510. EV_SET_REG2(*rSh, *rS, w1, w2);
  2511. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2512. 0.4,6.RS,11.0,16.RB,21.725:EVX:e500:efsctsi %RS,%RB:Convert Floating-Point to Signed Integer
  2513. int64_t temp;
  2514. int32_t w1, w2;
  2515. sim_fpu b;
  2516. w1 = *rSh;
  2517. sim_fpu_32to (&b, *rB);
  2518. sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
  2519. EV_SET_REG2(*rSh, *rS, w1, w2);
  2520. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2521. 0.4,6.RS,11.0,16.RB,21.730:EVX:e500:efsctsiz %RS,%RB:Convert Floating-Point to Signed Integer with Round toward Zero
  2522. int64_t temp;
  2523. int32_t w1, w2;
  2524. sim_fpu b;
  2525. w1 = *rSh;
  2526. sim_fpu_32to (&b, *rB);
  2527. sim_fpu_to32i (&w2, &b, sim_fpu_round_zero);
  2528. EV_SET_REG2(*rSh, *rS, w1, w2);
  2529. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2530. 0.4,6.RS,11.0,16.RB,21.724:EVX:e500:efsctui %RS,%RB:Convert Floating-Point to Unsigned Integer
  2531. uint64_t temp;
  2532. int32_t w1, w2;
  2533. sim_fpu b;
  2534. w1 = *rSh;
  2535. sim_fpu_32to (&b, *rB);
  2536. sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
  2537. EV_SET_REG2(*rSh, *rS, w1, w2);
  2538. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2539. 0.4,6.RS,11.0,16.RB,21.728:EVX:e500:efsctuiz %RS,%RB:Convert Floating-Point to Unsigned Integer with Round toward Zero
  2540. uint64_t temp;
  2541. int32_t w1, w2;
  2542. sim_fpu b;
  2543. w1 = *rSh;
  2544. sim_fpu_32to (&b, *rB);
  2545. sim_fpu_to32u (&w2, &b, sim_fpu_round_zero);
  2546. EV_SET_REG2(*rSh, *rS, w1, w2);
  2547. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2548. 0.4,6.RS,11.0,16.RB,21.727:EVX:e500:efsctsf %RS,%RB:Convert Floating-Point to Signed Fraction
  2549. uint32_t w1, w2;
  2550. sim_fpu b, x, y;
  2551. w1 = *rSh;
  2552. sim_fpu_32to (&y, *rB);
  2553. sim_fpu_u32to (&x, 0x80000000, sim_fpu_round_default);
  2554. sim_fpu_mul (&b, &y, &x);
  2555. sim_fpu_to32i (&w2, &b, sim_fpu_round_default);
  2556. sim_fpu_to32 (&w2, &b);
  2557. EV_SET_REG2(*rSh, *rS, w1, w2);
  2558. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2559. 0.4,6.RS,11.0,16.RB,21.726:EVX:e500:efsctuf %RS,%RB:Convert Floating-Point to Unsigned Fraction
  2560. uint32_t w1, w2;
  2561. sim_fpu b, x, y;
  2562. w1 = *rSh;
  2563. sim_fpu_u64to (&x, 0x100000000, sim_fpu_round_default);
  2564. sim_fpu_32to (&y, *rB);
  2565. sim_fpu_mul (&b, &y, &x);
  2566. sim_fpu_to32u (&w2, &b, sim_fpu_round_default);
  2567. EV_SET_REG2(*rSh, *rS, w1, w2);
  2568. PPC_INSN_INT(RS_BITMASK, RB_BITMASK, 0);
  2569. #
  2570. # A.2.10 Vector Load/Store Instructions
  2571. #
  2572. 0.4,6.RS,11.RA,16.UIMM,21.769:EVX:e500:evldd %RS,%RA,%UIMM:Vector Load Double Word into Double Word
  2573. uint64_t m;
  2574. unsigned_word b;
  2575. unsigned_word EA;
  2576. if (RA_is_0) b = 0;
  2577. else b = *rA;
  2578. EA = b + (UIMM << 3);
  2579. m = MEM(unsigned, EA, 8);
  2580. EV_SET_REG1(*rSh, *rS, m);
  2581. //printf("evldd(%d<-%d + %u): m %08x.%08x, *rSh %x *rS %x\n", RS, RA, UIMM, (int)(m >> 32), (int)m, *rSh, *rS);
  2582. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2583. 0.4,6.RS,11.RA,16.RB,21.768:EVX:e500:evlddx %RS,%RA,%RB:Vector Load Double Word into Double Word Indexed
  2584. uint64_t m;
  2585. unsigned_word b;
  2586. unsigned_word EA;
  2587. if (RA_is_0) b = 0;
  2588. else b = *rA;
  2589. EA = b + *rB;
  2590. m = MEM(unsigned, EA, 8);
  2591. EV_SET_REG1(*rSh, *rS, m);
  2592. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2593. 0.4,6.RS,11.RA,16.UIMM,21.771:EVX:e500:evldw %RS,%RA,%UIMM:Vector Load Double into Two Words
  2594. unsigned_word b;
  2595. unsigned_word EA;
  2596. uint32_t w1, w2;
  2597. if (RA_is_0) b = 0;
  2598. else b = *rA;
  2599. EA = b + (UIMM << 3);
  2600. w1 = MEM(unsigned, EA, 4);
  2601. w2 = MEM(unsigned, EA + 4, 4);
  2602. EV_SET_REG2(*rSh, *rS, w1, w2);
  2603. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2604. 0.4,6.RS,11.RA,16.RB,21.770:EVX:e500:evldwx %RS,%RA,%RB:Vector Load Double into Two Words Indexed
  2605. unsigned_word b;
  2606. unsigned_word EA;
  2607. uint32_t w1, w2;
  2608. if (RA_is_0) b = 0;
  2609. else b = *rA;
  2610. EA = b + *rB;
  2611. w1 = MEM(unsigned, EA, 4);
  2612. w2 = MEM(unsigned, EA + 4, 4);
  2613. EV_SET_REG2(*rSh, *rS, w1, w2);
  2614. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2615. 0.4,6.RS,11.RA,16.UIMM,21.773:EVX:e500:evldh %RS,%RA,%UIMM:Vector Load Double into 4 Half Words
  2616. unsigned_word b;
  2617. unsigned_word EA;
  2618. uint16_t h1, h2, h3, h4;
  2619. if (RA_is_0) b = 0;
  2620. else b = *rA;
  2621. EA = b + (UIMM << 3);
  2622. h1 = MEM(unsigned, EA, 2);
  2623. h2 = MEM(unsigned, EA + 2, 2);
  2624. h3 = MEM(unsigned, EA + 4, 2);
  2625. h4 = MEM(unsigned, EA + 6, 2);
  2626. EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
  2627. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2628. 0.4,6.RS,11.RA,16.RB,21.772:EVX:e500:evldhx %RS,%RA,%RB:Vector Load Double into 4 Half Words Indexed
  2629. unsigned_word b;
  2630. unsigned_word EA;
  2631. uint16_t h1, h2, h3, h4;
  2632. if (RA_is_0) b = 0;
  2633. else b = *rA;
  2634. EA = b + *rB;
  2635. h1 = MEM(unsigned, EA, 2);
  2636. h2 = MEM(unsigned, EA + 2, 2);
  2637. h3 = MEM(unsigned, EA + 4, 2);
  2638. h4 = MEM(unsigned, EA + 6, 2);
  2639. EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
  2640. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2641. 0.4,6.RS,11.RA,16.UIMM,21.785:EVX:e500:evlwhe %RS,%RA,%UIMM:Vector Load Word into Two Half Words Even
  2642. unsigned_word b;
  2643. unsigned_word EA;
  2644. uint16_t h1, h2, h3, h4;
  2645. if (RA_is_0) b = 0;
  2646. else b = *rA;
  2647. EA = b + (UIMM << 2);
  2648. h1 = MEM(unsigned, EA, 2);
  2649. h2 = 0;
  2650. h3 = MEM(unsigned, EA + 2, 2);
  2651. h4 = 0;
  2652. EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
  2653. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2654. 0.4,6.RS,11.RA,16.RB,21.784:EVX:e500:evlwhex %RS,%RA,%RB:Vector Load Word into Two Half Words Even Indexed
  2655. unsigned_word b;
  2656. unsigned_word EA;
  2657. uint16_t h1, h2, h3, h4;
  2658. if (RA_is_0) b = 0;
  2659. else b = *rA;
  2660. EA = b + *rB;
  2661. h1 = MEM(unsigned, EA, 2);
  2662. h2 = 0;
  2663. h3 = MEM(unsigned, EA + 2, 2);
  2664. h4 = 0;
  2665. EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
  2666. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2667. 0.4,6.RS,11.RA,16.UIMM,21.789:EVX:e500:evlwhou %RS,%RA,%UIMM:Vector Load Word into Two Half Words Odd Unsigned zero-extended
  2668. unsigned_word b;
  2669. unsigned_word EA;
  2670. uint16_t h1, h2, h3, h4;
  2671. if (RA_is_0) b = 0;
  2672. else b = *rA;
  2673. EA = b + (UIMM << 2);
  2674. h1 = 0;
  2675. h2 = MEM(unsigned, EA, 2);
  2676. h3 = 0;
  2677. h4 = MEM(unsigned, EA + 2, 2);
  2678. EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
  2679. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2680. 0.4,6.RS,11.RA,16.RB,21.788:EVX:e500:evlwhoux %RS,%RA,%RB:Vector Load Word into Two Half Words Odd Unsigned Indexed zero-extended
  2681. unsigned_word b;
  2682. unsigned_word EA;
  2683. uint16_t h1, h2, h3, h4;
  2684. if (RA_is_0) b = 0;
  2685. else b = *rA;
  2686. EA = b + *rB;
  2687. h1 = 0;
  2688. h2 = MEM(unsigned, EA, 2);
  2689. h3 = 0;
  2690. h4 = MEM(unsigned, EA + 2, 2);
  2691. EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
  2692. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2693. 0.4,6.RS,11.RA,16.UIMM,21.791:EVX:e500:evlwhos %RS,%RA,%UIMM:Vector Load Word into Half Words Odd Signed with sign extension
  2694. unsigned_word b;
  2695. unsigned_word EA;
  2696. uint16_t h1, h2, h3, h4;
  2697. if (RA_is_0) b = 0;
  2698. else b = *rA;
  2699. EA = b + (UIMM << 2);
  2700. h2 = MEM(unsigned, EA, 2);
  2701. if (h2 & 0x8000)
  2702. h1 = 0xffff;
  2703. else
  2704. h1 = 0;
  2705. h4 = MEM(unsigned, EA + 2, 2);
  2706. if (h4 & 0x8000)
  2707. h3 = 0xffff;
  2708. else
  2709. h3 = 0;
  2710. EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
  2711. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2712. 0.4,6.RS,11.RA,16.RB,21.790:EVX:e500:evlwhosx %RS,%RA,%RB:Vector Load Word into Half Words Odd Signed Indexed with sign extension
  2713. unsigned_word b;
  2714. unsigned_word EA;
  2715. uint16_t h1, h2, h3, h4;
  2716. if (RA_is_0) b = 0;
  2717. else b = *rA;
  2718. EA = b + *rB;
  2719. h2 = MEM(unsigned, EA, 2);
  2720. if (h2 & 0x8000)
  2721. h1 = 0xffff;
  2722. else
  2723. h1 = 0;
  2724. h4 = MEM(unsigned, EA + 2, 2);
  2725. if (h4 & 0x8000)
  2726. h3 = 0xffff;
  2727. else
  2728. h3 = 0;
  2729. EV_SET_REG4(*rSh, *rS, h1, h2, h3, h4);
  2730. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2731. 0.4,6.RS,11.RA,16.UIMM,21.793:EVX:e500:evlwwsplat %RS,%RA,%UIMM:Vector Load Word into Word and Splat
  2732. unsigned_word b;
  2733. unsigned_word EA;
  2734. uint32_t w1;
  2735. if (RA_is_0) b = 0;
  2736. else b = *rA;
  2737. EA = b + (UIMM << 2);
  2738. w1 = MEM(unsigned, EA, 4);
  2739. EV_SET_REG2(*rSh, *rS, w1, w1);
  2740. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2741. 0.4,6.RS,11.RA,16.RB,21.792:EVX:e500:evlwwsplatx %RS,%RA,%RB:Vector Load Word into Word and Splat Indexed
  2742. unsigned_word b;
  2743. unsigned_word EA;
  2744. uint32_t w1;
  2745. if (RA_is_0) b = 0;
  2746. else b = *rA;
  2747. EA = b + *rB;
  2748. w1 = MEM(unsigned, EA, 4);
  2749. EV_SET_REG2(*rSh, *rS, w1, w1);
  2750. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2751. 0.4,6.RS,11.RA,16.UIMM,21.797:EVX:e500:evlwhsplat %RS,%RA,%UIMM:Vector Load Word into 2 Half Words and Splat
  2752. unsigned_word b;
  2753. unsigned_word EA;
  2754. uint16_t h1, h2;
  2755. if (RA_is_0) b = 0;
  2756. else b = *rA;
  2757. EA = b + (UIMM << 2);
  2758. h1 = MEM(unsigned, EA, 2);
  2759. h2 = MEM(unsigned, EA + 2, 2);
  2760. EV_SET_REG4(*rSh, *rS, h1, h1, h2, h2);
  2761. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2762. 0.4,6.RS,11.RA,16.RB,21.796:EVX:e500:evlwhsplatx %RS,%RA,%RB:Vector Load Word into 2 Half Words and Splat Indexed
  2763. unsigned_word b;
  2764. unsigned_word EA;
  2765. uint16_t h1, h2;
  2766. if (RA_is_0) b = 0;
  2767. else b = *rA;
  2768. EA = b + *rB;
  2769. h1 = MEM(unsigned, EA, 2);
  2770. h2 = MEM(unsigned, EA + 2, 2);
  2771. EV_SET_REG4(*rSh, *rS, h1, h1, h2, h2);
  2772. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2773. 0.4,6.RS,11.RA,16.UIMM,21.777:EVX:e500:evlhhesplat %RS,%RA,%UIMM:Vector Load Half Word into Half Words Even and Splat
  2774. unsigned_word b;
  2775. unsigned_word EA;
  2776. uint16_t h;
  2777. if (RA_is_0) b = 0;
  2778. else b = *rA;
  2779. EA = b + (UIMM << 1);
  2780. h = MEM(unsigned, EA, 2);
  2781. EV_SET_REG4(*rSh, *rS, h, 0, h, 0);
  2782. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2783. 0.4,6.RS,11.RA,16.RB,21.776:EVX:e500:evlhhesplatx %RS,%RA,%RB:Vector Load Half Word into Half Words Even and Splat Indexed
  2784. unsigned_word b;
  2785. unsigned_word EA;
  2786. uint16_t h;
  2787. if (RA_is_0) b = 0;
  2788. else b = *rA;
  2789. EA = b + *rB;
  2790. h = MEM(unsigned, EA, 2);
  2791. EV_SET_REG4(*rSh, *rS, h, 0, h, 0);
  2792. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2793. 0.4,6.RS,11.RA,16.UIMM,21.781:EVX:e500:evlhhousplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Unsigned and Splat
  2794. unsigned_word b;
  2795. unsigned_word EA;
  2796. uint16_t h;
  2797. if (RA_is_0) b = 0;
  2798. else b = *rA;
  2799. EA = b + (UIMM << 1);
  2800. h = MEM(unsigned, EA, 2);
  2801. EV_SET_REG4(*rSh, *rS, 0, h, 0, h);
  2802. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2803. 0.4,6.RS,11.RA,16.RB,21.780:EVX:e500:evlhhousplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed
  2804. unsigned_word b;
  2805. unsigned_word EA;
  2806. uint16_t h;
  2807. if (RA_is_0) b = 0;
  2808. else b = *rA;
  2809. EA = b + *rB;
  2810. h = MEM(unsigned, EA, 2);
  2811. EV_SET_REG4(*rSh, *rS, 0, h, 0, h);
  2812. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2813. 0.4,6.RS,11.RA,16.UIMM,21.783:EVX:e500:evlhhossplat %RS,%RA,%UIMM:Vector Load Half Word into Half Word Odd Signed and Splat
  2814. unsigned_word b;
  2815. unsigned_word EA;
  2816. uint16_t h1, h2;
  2817. if (RA_is_0) b = 0;
  2818. else b = *rA;
  2819. EA = b + (UIMM << 1);
  2820. h2 = MEM(unsigned, EA, 2);
  2821. if (h2 & 0x8000)
  2822. h1 = 0xffff;
  2823. else
  2824. h1 = 0;
  2825. EV_SET_REG4(*rSh, *rS, h1, h2, h1, h2);
  2826. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2827. 0.4,6.RS,11.RA,16.RB,21.782:EVX:e500:evlhhossplatx %RS,%RA,%RB:Vector Load Half Word into Half Word Odd Signed and Splat Indexed
  2828. unsigned_word b;
  2829. unsigned_word EA;
  2830. uint16_t h1, h2;
  2831. if (RA_is_0) b = 0;
  2832. else b = *rA;
  2833. EA = b + *rB;
  2834. h2 = MEM(unsigned, EA, 2);
  2835. if (h2 & 0x8000)
  2836. h1 = 0xffff;
  2837. else
  2838. h1 = 0;
  2839. EV_SET_REG4(*rSh, *rS, h1, h2, h1, h2);
  2840. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2841. 0.4,6.RS,11.RA,16.UIMM,21.801:EVX:e500:evstdd %RS,%RA,%UIMM:Vector Store Double of Double
  2842. unsigned_word b;
  2843. unsigned_word EA;
  2844. if (RA_is_0) b = 0;
  2845. else b = *rA;
  2846. EA = b + (UIMM << 3);
  2847. STORE(EA, 4, (*rSh));
  2848. STORE(EA + 4, 4, (*rS));
  2849. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2850. 0.4,6.RS,11.RA,16.RB,21.800:EVX:e500:evstddx %RS,%RA,%RB:Vector Store Double of Double Indexed
  2851. unsigned_word b;
  2852. unsigned_word EA;
  2853. if (RA_is_0) b = 0;
  2854. else b = *rA;
  2855. EA = b + *rB;
  2856. STORE(EA, 4, (*rSh));
  2857. STORE(EA + 4, 4, (*rS));
  2858. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2859. 0.4,6.RS,11.RA,16.UIMM,21.803:EVX:e500:evstdw %RS,%RA,%UIMM:Vector Store Double of Two Words
  2860. unsigned_word b;
  2861. unsigned_word EA;
  2862. uint32_t w1, w2;
  2863. if (RA_is_0) b = 0;
  2864. else b = *rA;
  2865. EA = b + (UIMM << 3);
  2866. w1 = *rSh;
  2867. w2 = *rS;
  2868. STORE(EA + 0, 4, w1);
  2869. STORE(EA + 4, 4, w2);
  2870. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2871. 0.4,6.RS,11.RA,16.RB,21.802:EVX:e500:evstdwx %RS,%RA,%RB:Vector Store Double of Two Words Indexed
  2872. unsigned_word b;
  2873. unsigned_word EA;
  2874. uint32_t w1, w2;
  2875. if (RA_is_0) b = 0;
  2876. else b = *rA;
  2877. EA = b + *rB;
  2878. w1 = *rSh;
  2879. w2 = *rS;
  2880. STORE(EA + 0, 4, w1);
  2881. STORE(EA + 4, 4, w2);
  2882. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2883. 0.4,6.RS,11.RA,16.UIMM,21.805:EVX:e500:evstdh %RS,%RA,%UIMM:Vector Store Double of Four Half Words
  2884. unsigned_word b;
  2885. unsigned_word EA;
  2886. uint16_t h1, h2, h3, h4;
  2887. if (RA_is_0) b = 0;
  2888. else b = *rA;
  2889. EA = b + (UIMM << 3);
  2890. h1 = EV_HIHALF(*rSh);
  2891. h2 = EV_LOHALF(*rSh);
  2892. h3 = EV_HIHALF(*rS);
  2893. h4 = EV_LOHALF(*rS);
  2894. STORE(EA + 0, 2, h1);
  2895. STORE(EA + 2, 2, h2);
  2896. STORE(EA + 4, 2, h3);
  2897. STORE(EA + 6, 2, h4);
  2898. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2899. 0.4,6.RS,11.RA,16.RB,21.804:EVX:e500:evstdhx %RS,%RA,%RB:Vector Store Double of Four Half Words Indexed
  2900. unsigned_word b;
  2901. unsigned_word EA;
  2902. uint16_t h1, h2, h3, h4;
  2903. if (RA_is_0) b = 0;
  2904. else b = *rA;
  2905. EA = b + *rB;
  2906. h1 = EV_HIHALF(*rSh);
  2907. h2 = EV_LOHALF(*rSh);
  2908. h3 = EV_HIHALF(*rS);
  2909. h4 = EV_LOHALF(*rS);
  2910. STORE(EA + 0, 2, h1);
  2911. STORE(EA + 2, 2, h2);
  2912. STORE(EA + 4, 2, h3);
  2913. STORE(EA + 6, 2, h4);
  2914. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2915. 0.4,6.RS,11.RA,16.UIMM,21.825:EVX:e500:evstwwe %RS,%RA,%UIMM:Vector Store Word of Word from Even
  2916. unsigned_word b;
  2917. unsigned_word EA;
  2918. uint32_t w;
  2919. if (RA_is_0) b = 0;
  2920. else b = *rA;
  2921. EA = b + (UIMM << 3);
  2922. w = *rSh;
  2923. STORE(EA, 4, w);
  2924. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2925. 0.4,6.RS,11.RA,16.RB,21.824:EVX:e500:evstwwex %RS,%RA,%RB:Vector Store Word of Word from Even Indexed
  2926. unsigned_word b;
  2927. unsigned_word EA;
  2928. uint32_t w;
  2929. if (RA_is_0) b = 0;
  2930. else b = *rA;
  2931. EA = b + *rB;
  2932. w = *rSh;
  2933. STORE(EA, 4, w);
  2934. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2935. 0.4,6.RS,11.RA,16.UIMM,21.829:EVX:e500:evstwwo %RS,%RA,%UIMM:Vector Store Word of Word from Odd
  2936. unsigned_word b;
  2937. unsigned_word EA;
  2938. uint32_t w;
  2939. if (RA_is_0) b = 0;
  2940. else b = *rA;
  2941. EA = b + (UIMM << 3);
  2942. w = *rS;
  2943. STORE(EA, 4, w);
  2944. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2945. 0.4,6.RS,11.RA,16.RB,21.828:EVX:e500:evstwwox %RS,%RA,%RB:Vector Store Word of Word from Odd Indexed
  2946. unsigned_word b;
  2947. unsigned_word EA;
  2948. uint32_t w;
  2949. if (RA_is_0) b = 0;
  2950. else b = *rA;
  2951. EA = b + *rB;
  2952. w = *rS;
  2953. STORE(EA, 4, w);
  2954. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2955. 0.4,6.RS,11.RA,16.UIMM,21.817:EVX:e500:evstwhe %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Even
  2956. unsigned_word b;
  2957. unsigned_word EA;
  2958. uint16_t h1, h2;
  2959. if (RA_is_0) b = 0;
  2960. else b = *rA;
  2961. EA = b + (UIMM << 3);
  2962. h1 = EV_HIHALF(*rSh);
  2963. h2 = EV_HIHALF(*rS);
  2964. STORE(EA + 0, 2, h1);
  2965. STORE(EA + 2, 2, h2);
  2966. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2967. 0.4,6.RS,11.RA,16.RB,21.816:EVX:e500:evstwhex %RS,%RA,%RB:Vector Store Word of Two Half Words from Even Indexed
  2968. unsigned_word b;
  2969. unsigned_word EA;
  2970. uint16_t h1, h2;
  2971. if (RA_is_0) b = 0;
  2972. else b = *rA;
  2973. EA = b + *rB;
  2974. h1 = EV_HIHALF(*rSh);
  2975. h2 = EV_HIHALF(*rS);
  2976. STORE(EA + 0, 2, h1);
  2977. STORE(EA + 2, 2, h2);
  2978. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  2979. 0.4,6.RS,11.RA,16.UIMM,21.821:EVX:e500:evstwho %RS,%RA,%UIMM:Vector Store Word of Two Half Words from Odd
  2980. unsigned_word b;
  2981. unsigned_word EA;
  2982. uint16_t h1, h2;
  2983. if (RA_is_0) b = 0;
  2984. else b = *rA;
  2985. EA = b + (UIMM << 3);
  2986. h1 = EV_LOHALF(*rSh);
  2987. h2 = EV_LOHALF(*rS);
  2988. STORE(EA + 0, 2, h1);
  2989. STORE(EA + 2, 2, h2);
  2990. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1), 0);
  2991. 0.4,6.RS,11.RA,16.RB,21.820:EVX:e500:evstwhox %RS,%RA,%RB:Vector Store Word of Two Half Words from Odd Indexed
  2992. unsigned_word b;
  2993. unsigned_word EA;
  2994. uint16_t h1, h2;
  2995. if (RA_is_0) b = 0;
  2996. else b = *rA;
  2997. EA = b + *rB;
  2998. h1 = EV_LOHALF(*rSh);
  2999. h2 = EV_LOHALF(*rS);
  3000. STORE(EA + 0, 2, h1);
  3001. STORE(EA + 2, 2, h2);
  3002. PPC_INSN_INT(RS_BITMASK, (RA_BITMASK & ~1) | RB_BITMASK, 0);
  3003. #
  3004. # 4.5.1 Integer Select Instruction
  3005. #
  3006. 0.31,6.RS,11.RA,16.RB,21.CRB,26.30:X:e500:isel %RS,%RA,%RB,%CRB:Integer Select
  3007. if (CR & (1 << (31 - (unsigned)CRB)))
  3008. if (RA_is_0)
  3009. EV_SET_REG1(*rSh, *rS, 0);
  3010. else
  3011. EV_SET_REG2(*rSh, *rS, *rAh, *rA);
  3012. else
  3013. EV_SET_REG2(*rSh, *rS, *rBh, *rB);
  3014. PPC_INSN_INT(RS_BITMASK, RA_BITMASK | RB_BITMASK, 0);