1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359 |
- # Altivec instruction set, for PSIM, the PowerPC simulator.
- # Copyright 2003-2022 Free Software Foundation, Inc.
- # Contributed by Red Hat Inc; developed under contract from Motorola.
- # Written by matthew green <mrg@redhat.com>.
- # This file is part of GDB.
- # This program is free software; you can redistribute it and/or modify
- # it under the terms of the GNU General Public License as published by
- # the Free Software Foundation; either version 3 of the License, or
- # (at your option) any later version.
- # This program is distributed in the hope that it will be useful,
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- # GNU General Public License for more details.
- # You should have received a copy of the GNU General Public License
- # along with this program. If not, see <http://www.gnu.org/licenses/>. */
- #
- # Motorola AltiVec instructions.
- #
- :cache:av:::VS:VS:
- :cache:av::vreg *:vS:VS:(cpu_registers(processor)->altivec.vr + VS)
- :cache:av::uint32_t:VS_BITMASK:VS:(1 << VS)
- :cache:av:::VA:VA:
- :cache:av::vreg *:vA:VA:(cpu_registers(processor)->altivec.vr + VA)
- :cache:av::uint32_t:VA_BITMASK:VA:(1 << VA)
- :cache:av:::VB:VB:
- :cache:av::vreg *:vB:VB:(cpu_registers(processor)->altivec.vr + VB)
- :cache:av::uint32_t:VB_BITMASK:VB:(1 << VB)
- :cache:av:::VC:VC:
- :cache:av::vreg *:vC:VC:(cpu_registers(processor)->altivec.vr + VC)
- :cache:av::uint32_t:VC_BITMASK:VC:(1 << VC)
- # Flags for model.h
- ::model-macro:::
- #define PPC_INSN_INT_VR(OUT_MASK, IN_MASK, OUT_VMASK, IN_VMASK) \
- do { \
- if (CURRENT_MODEL_ISSUE > 0) \
- ppc_insn_int_vr(MY_INDEX, cpu_model(processor), OUT_MASK, IN_MASK, OUT_VMASK, IN_VMASK); \
- } while (0)
- #define PPC_INSN_VR(OUT_VMASK, IN_VMASK) \
- do { \
- if (CURRENT_MODEL_ISSUE > 0) \
- ppc_insn_vr(MY_INDEX, cpu_model(processor), OUT_VMASK, IN_VMASK); \
- } while (0)
- #define PPC_INSN_VR_CR(OUT_VMASK, IN_VMASK, CR_MASK) \
- do { \
- if (CURRENT_MODEL_ISSUE > 0) \
- ppc_insn_vr_cr(MY_INDEX, cpu_model(processor), OUT_VMASK, IN_VMASK, CR_MASK); \
- } while (0)
- #define PPC_INSN_VR_VSCR(OUT_VMASK, IN_VMASK) \
- do { \
- if (CURRENT_MODEL_ISSUE > 0) \
- ppc_insn_vr_vscr(MY_INDEX, cpu_model(processor), OUT_VMASK, IN_VMASK); \
- } while (0)
- #define PPC_INSN_FROM_VSCR(VR_MASK) \
- do { \
- if (CURRENT_MODEL_ISSUE > 0) \
- ppc_insn_from_vscr(MY_INDEX, cpu_model(processor), VR_MASK); \
- } while (0)
- #define PPC_INSN_TO_VSCR(VR_MASK) \
- do { \
- if (CURRENT_MODEL_ISSUE > 0) \
- ppc_insn_to_vscr(MY_INDEX, cpu_model(processor), VR_MASK); \
- } while (0)
- # Trace waiting for AltiVec registers to become available
- void::model-static::model_trace_altivec_busy_p:model_data *model_ptr, uint32_t vr_busy
- int i;
- if (vr_busy) {
- vr_busy &= model_ptr->vr_busy;
- for(i = 0; i < 32; i++) {
- if (((1 << i) & vr_busy) != 0) {
- TRACE(trace_model, ("Waiting for register v%d.\n", i));
- }
- }
- }
- if (model_ptr->vscr_busy)
- TRACE(trace_model, ("Waiting for VSCR\n"));
- # Trace making AltiVec registers busy
- void::model-static::model_trace_altivec_make_busy:model_data *model_ptr, uint32_t vr_mask, uint32_t cr_mask
- int i;
- if (vr_mask) {
- for(i = 0; i < 32; i++) {
- if (((1 << i) & vr_mask) != 0) {
- TRACE(trace_model, ("Register v%d is now busy.\n", i));
- }
- }
- }
- if (cr_mask) {
- for(i = 0; i < 8; i++) {
- if (((1 << i) & cr_mask) != 0) {
- TRACE(trace_model, ("Register cr%d is now busy.\n", i));
- }
- }
- }
- # Schedule an AltiVec instruction that takes integer input registers and produces output registers
- void::model-function::ppc_insn_int_vr:itable_index index, model_data *model_ptr, const uint32_t out_mask, const uint32_t in_mask, const uint32_t out_vmask, const uint32_t in_vmask
- const uint32_t int_mask = out_mask | in_mask;
- const uint32_t vr_mask = out_vmask | in_vmask;
- model_busy *busy_ptr;
- if ((model_ptr->int_busy & int_mask) != 0 || (model_ptr->vr_busy & vr_mask)) {
- model_new_cycle(model_ptr); /* don't count first dependency as a stall */
- while ((model_ptr->int_busy & int_mask) != 0 || (model_ptr->vr_busy & vr_mask)) {
- if (WITH_TRACE && ppc_trace[trace_model]) {
- model_trace_busy_p(model_ptr, int_mask, 0, 0, PPC_NO_SPR);
- model_trace_altivec_busy_p(model_ptr, vr_mask);
- }
- model_ptr->nr_stalls_data++;
- model_new_cycle(model_ptr);
- }
- }
- busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
- model_ptr->int_busy |= out_mask;
- busy_ptr->int_busy |= out_mask;
- model_ptr->vr_busy |= out_vmask;
- busy_ptr->vr_busy |= out_vmask;
- if (out_mask)
- busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
- if (out_vmask)
- busy_ptr->nr_writebacks += (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
- if (WITH_TRACE && ppc_trace[trace_model]) {
- model_trace_make_busy(model_ptr, out_mask, 0, 0);
- model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
- }
- # Schedule an AltiVec instruction that takes vector input registers and produces vector output registers
- void::model-function::ppc_insn_vr:itable_index index, model_data *model_ptr, const uint32_t out_vmask, const uint32_t in_vmask
- const uint32_t vr_mask = out_vmask | in_vmask;
- model_busy *busy_ptr;
- if (model_ptr->vr_busy & vr_mask) {
- model_new_cycle(model_ptr); /* don't count first dependency as a stall */
- while (model_ptr->vr_busy & vr_mask) {
- if (WITH_TRACE && ppc_trace[trace_model]) {
- model_trace_altivec_busy_p(model_ptr, vr_mask);
- }
- model_ptr->nr_stalls_data++;
- model_new_cycle(model_ptr);
- }
- }
- busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
- model_ptr->vr_busy |= out_vmask;
- busy_ptr->vr_busy |= out_vmask;
- if (out_vmask)
- busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
- if (WITH_TRACE && ppc_trace[trace_model]) {
- model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
- }
- # Schedule an AltiVec instruction that takes vector input registers and produces vector output registers, touches CR
- void::model-function::ppc_insn_vr_cr:itable_index index, model_data *model_ptr, const uint32_t out_vmask, const uint32_t in_vmask, const uint32_t cr_mask
- const uint32_t vr_mask = out_vmask | in_vmask;
- model_busy *busy_ptr;
- if ((model_ptr->vr_busy & vr_mask) || (model_ptr->cr_fpscr_busy & cr_mask)) {
- model_new_cycle(model_ptr); /* don't count first dependency as a stall */
- while ((model_ptr->vr_busy & vr_mask) || (model_ptr->cr_fpscr_busy & cr_mask)) {
- if (WITH_TRACE && ppc_trace[trace_model]) {
- model_trace_busy_p(model_ptr, 0, 0, cr_mask, PPC_NO_SPR);
- model_trace_altivec_busy_p(model_ptr, vr_mask);
- }
- model_ptr->nr_stalls_data++;
- model_new_cycle(model_ptr);
- }
- }
- busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
- model_ptr->cr_fpscr_busy |= cr_mask;
- busy_ptr->cr_fpscr_busy |= cr_mask;
- model_ptr->vr_busy |= out_vmask;
- busy_ptr->vr_busy |= out_vmask;
- if (out_vmask)
- busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
- if (cr_mask)
- busy_ptr->nr_writebacks++;
- if (WITH_TRACE && ppc_trace[trace_model])
- model_trace_altivec_make_busy(model_ptr, vr_mask, cr_mask);
- # Schedule an AltiVec instruction that takes vector input registers and produces vector output registers, touches VSCR
- void::model-function::ppc_insn_vr_vscr:itable_index index, model_data *model_ptr, const uint32_t out_vmask, const uint32_t in_vmask
- const uint32_t vr_mask = out_vmask | in_vmask;
- model_busy *busy_ptr;
- if ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
- model_new_cycle(model_ptr); /* don't count first dependency as a stall */
- while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
- if (WITH_TRACE && ppc_trace[trace_model])
- model_trace_altivec_busy_p(model_ptr, vr_mask);
- model_ptr->nr_stalls_data++;
- model_new_cycle(model_ptr);
- }
- }
- busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
- model_ptr->vr_busy |= out_vmask;
- busy_ptr->vr_busy |= out_vmask;
- model_ptr->vscr_busy = 1;
- busy_ptr->vscr_busy = 1;
- if (out_vmask)
- busy_ptr->nr_writebacks = 1 + (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
- if (WITH_TRACE && ppc_trace[trace_model])
- model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
- # Schedule an MFVSCR instruction that VSCR input register and produces an AltiVec output register
- void::model-function::ppc_insn_from_vscr:itable_index index, model_data *model_ptr, const uint32_t vr_mask
- model_busy *busy_ptr;
- while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
- if (WITH_TRACE && ppc_trace[trace_model])
- model_trace_altivec_busy_p(model_ptr, vr_mask);
- model_ptr->nr_stalls_data++;
- model_new_cycle(model_ptr);
- }
- busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
- model_ptr->cr_fpscr_busy |= vr_mask;
- busy_ptr->cr_fpscr_busy |= vr_mask;
- if (vr_mask)
- busy_ptr->nr_writebacks = 1;
- model_ptr->vr_busy |= vr_mask;
- if (WITH_TRACE && ppc_trace[trace_model])
- model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
- # Schedule an MTVSCR instruction that one AltiVec input register and produces a vscr output register
- void::model-function::ppc_insn_to_vscr:itable_index index, model_data *model_ptr, const uint32_t vr_mask
- model_busy *busy_ptr;
- while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
- if (WITH_TRACE && ppc_trace[trace_model])
- model_trace_altivec_busy_p(model_ptr, vr_mask);
- model_ptr->nr_stalls_data++;
- model_new_cycle(model_ptr);
- }
- busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
- busy_ptr ->vscr_busy = 1;
- model_ptr->vscr_busy = 1;
- busy_ptr->nr_writebacks = 1;
- TRACE(trace_model,("Making VSCR busy.\n"));
- # The follow are AltiVec saturate operations
- int8_t::model-function::altivec_signed_saturate_8:int16_t val, int *sat
- int8_t rv;
- if (val > 127) {
- rv = 127;
- *sat = 1;
- } else if (val < -128) {
- rv = -128;
- *sat = 1;
- } else {
- rv = val;
- *sat = 0;
- }
- return rv;
- int16_t::model-function::altivec_signed_saturate_16:int32_t val, int *sat
- int16_t rv;
- if (val > 32767) {
- rv = 32767;
- *sat = 1;
- } else if (val < -32768) {
- rv = -32768;
- *sat = 1;
- } else {
- rv = val;
- *sat = 0;
- }
- return rv;
- int32_t::model-function::altivec_signed_saturate_32:int64_t val, int *sat
- int32_t rv;
- if (val > 2147483647) {
- rv = 2147483647;
- *sat = 1;
- } else if (val < -2147483648LL) {
- rv = -2147483648LL;
- *sat = 1;
- } else {
- rv = val;
- *sat = 0;
- }
- return rv;
- uint8_t::model-function::altivec_unsigned_saturate_8:int16_t val, int *sat
- uint8_t rv;
- if (val > 255) {
- rv = 255;
- *sat = 1;
- } else if (val < 0) {
- rv = 0;
- *sat = 1;
- } else {
- rv = val;
- *sat = 0;
- }
- return rv;
- uint16_t::model-function::altivec_unsigned_saturate_16:int32_t val, int *sat
- uint16_t rv;
- if (val > 65535) {
- rv = 65535;
- *sat = 1;
- } else if (val < 0) {
- rv = 0;
- *sat = 1;
- } else {
- rv = val;
- *sat = 0;
- }
- return rv;
- uint32_t::model-function::altivec_unsigned_saturate_32:int64_t val, int *sat
- uint32_t rv;
- if (val > 4294967295LL) {
- rv = 4294967295LL;
- *sat = 1;
- } else if (val < 0) {
- rv = 0;
- *sat = 1;
- } else {
- rv = val;
- *sat = 0;
- }
- return rv;
- #
- # Load instructions, 6-14 ... 6-22.
- #
- 0.31,6.VS,11.RA,16.RB,21.7,31.0:X:av:lvebx %VD, %RA, %RB:Load Vector Element Byte Indexed
- unsigned_word b;
- unsigned_word EA;
- unsigned_word eb;
- if (RA_is_0) b = 0;
- else b = *rA;
- EA = b + *rB;
- eb = EA & 0xf;
- (*vS).b[AV_BINDEX(eb)] = MEM(unsigned, EA, 1);
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- 0.31,6.VS,11.RA,16.RB,21.39,31.0:X:av:lvehx %VD, %RA, %RB:Load Vector Element Half Word Indexed
- unsigned_word b;
- unsigned_word EA;
- unsigned_word eb;
- if (RA_is_0) b = 0;
- else b = *rA;
- EA = (b + *rB) & ~1;
- eb = EA & 0xf;
- (*vS).h[AV_HINDEX(eb/2)] = MEM(unsigned, EA, 2);
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- 0.31,6.VS,11.RA,16.RB,21.71,31.0:X:av:lvewx %VD, %RA, %RB:Load Vector Element Word Indexed
- unsigned_word b;
- unsigned_word EA;
- unsigned_word eb;
- if (RA_is_0) b = 0;
- else b = *rA;
- EA = (b + *rB) & ~3;
- eb = EA & 0xf;
- (*vS).w[eb/4] = MEM(unsigned, EA, 4);
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- 0.31,6.VS,11.RA,16.RB,21.6,31.0:X:av:lvsl %VD, %RA, %RB:Load Vector for Shift Left
- unsigned_word b;
- unsigned_word addr;
- int i, j;
- if (RA_is_0) b = 0;
- else b = *rA;
- addr = b + *rB;
- j = addr & 0xf;
- for (i = 0; i < 16; i++)
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG)
- (*vS).b[AV_BINDEX(i)] = j++;
- else
- (*vS).b[AV_BINDEX(15 - i)] = j++;
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- 0.31,6.VS,11.RA,16.RB,21.38,31.0:X:av:lvsr %VD, %RA, %RB:Load Vector for Shift Right
- unsigned_word b;
- unsigned_word addr;
- int i, j;
- if (RA_is_0) b = 0;
- else b = *rA;
- addr = b + *rB;
- j = 0x10 - (addr & 0xf);
- for (i = 0; i < 16; i++)
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG)
- (*vS).b[AV_BINDEX(i)] = j++;
- else
- (*vS).b[AV_BINDEX(15 - i)] = j++;
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- 0.31,6.VS,11.RA,16.RB,21.103,31.0:X:av:lvx %VD, %RA, %RB:Load Vector Indexed
- unsigned_word b;
- unsigned_word EA;
- if (RA_is_0) b = 0;
- else b = *rA;
- EA = (b + *rB) & ~0xf;
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG) {
- (*vS).w[0] = MEM(unsigned, EA + 0, 4);
- (*vS).w[1] = MEM(unsigned, EA + 4, 4);
- (*vS).w[2] = MEM(unsigned, EA + 8, 4);
- (*vS).w[3] = MEM(unsigned, EA + 12, 4);
- } else {
- (*vS).w[0] = MEM(unsigned, EA + 12, 4);
- (*vS).w[1] = MEM(unsigned, EA + 8, 4);
- (*vS).w[2] = MEM(unsigned, EA + 4, 4);
- (*vS).w[3] = MEM(unsigned, EA + 0, 4);
- }
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- 0.31,6.VS,11.RA,16.RB,21.359,31.0:X:av:lvxl %VD, %RA, %RB:Load Vector Indexed LRU
- unsigned_word b;
- unsigned_word EA;
- if (RA_is_0) b = 0;
- else b = *rA;
- EA = (b + *rB) & ~0xf;
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG) {
- (*vS).w[0] = MEM(unsigned, EA + 0, 4);
- (*vS).w[1] = MEM(unsigned, EA + 4, 4);
- (*vS).w[2] = MEM(unsigned, EA + 8, 4);
- (*vS).w[3] = MEM(unsigned, EA + 12, 4);
- } else {
- (*vS).w[0] = MEM(unsigned, EA + 12, 4);
- (*vS).w[1] = MEM(unsigned, EA + 8, 4);
- (*vS).w[2] = MEM(unsigned, EA + 4, 4);
- (*vS).w[3] = MEM(unsigned, EA + 0, 4);
- }
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- #
- # Move to/from VSCR instructions, 6-23 & 6-24.
- #
- 0.4,6.VS,11.0,16.0,21.1540:VX:av:mfvscr %VS:Move from Vector Status and Control Register
- (*vS).w[0] = 0;
- (*vS).w[1] = 0;
- (*vS).w[2] = 0;
- (*vS).w[3] = VSCR;
- PPC_INSN_FROM_VSCR(VS_BITMASK);
- 0.4,6.0,11.0,16.VB,21.1604:VX:av:mtvscr %VB:Move to Vector Status and Control Register
- VSCR = (*vB).w[3];
- PPC_INSN_TO_VSCR(VB_BITMASK);
- #
- # Store instructions, 6-25 ... 6-29.
- #
- 0.31,6.VS,11.RA,16.RB,21.135,31.0:X:av:stvebx %VD, %RA, %RB:Store Vector Element Byte Indexed
- unsigned_word b;
- unsigned_word EA;
- unsigned_word eb;
- if (RA_is_0) b = 0;
- else b = *rA;
- EA = b + *rB;
- eb = EA & 0xf;
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG)
- STORE(EA, 1, (*vS).b[eb]);
- else
- STORE(EA, 1, (*vS).b[15-eb]);
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- 0.31,6.VS,11.RA,16.RB,21.167,31.0:X:av:stvehx %VD, %RA, %RB:Store Vector Element Half Word Indexed
- unsigned_word b;
- unsigned_word EA;
- unsigned_word eb;
- if (RA_is_0) b = 0;
- else b = *rA;
- EA = (b + *rB) & ~1;
- eb = EA & 0xf;
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG)
- STORE(EA, 2, (*vS).h[eb/2]);
- else
- STORE(EA, 2, (*vS).h[7-eb]);
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- 0.31,6.VS,11.RA,16.RB,21.199,31.0:X:av:stvewx %VD, %RA, %RB:Store Vector Element Word Indexed
- unsigned_word b;
- unsigned_word EA;
- unsigned_word eb;
- if (RA_is_0) b = 0;
- else b = *rA;
- EA = (b + *rB) & ~3;
- eb = EA & 0xf;
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG)
- STORE(EA, 4, (*vS).w[eb/4]);
- else
- STORE(EA, 4, (*vS).w[3-(eb/4)]);
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- 0.31,6.VS,11.RA,16.RB,21.231,31.0:X:av:stvx %VD, %RA, %RB:Store Vector Indexed
- unsigned_word b;
- unsigned_word EA;
- if (RA_is_0) b = 0;
- else b = *rA;
- EA = (b + *rB) & ~0xf;
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG) {
- STORE(EA + 0, 4, (*vS).w[0]);
- STORE(EA + 4, 4, (*vS).w[1]);
- STORE(EA + 8, 4, (*vS).w[2]);
- STORE(EA + 12, 4, (*vS).w[3]);
- } else {
- STORE(EA + 12, 4, (*vS).w[0]);
- STORE(EA + 8, 4, (*vS).w[1]);
- STORE(EA + 4, 4, (*vS).w[2]);
- STORE(EA + 0, 4, (*vS).w[3]);
- }
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- 0.31,6.VS,11.RA,16.RB,21.487,31.0:X:av:stvxl %VD, %RA, %RB:Store Vector Indexed LRU
- unsigned_word b;
- unsigned_word EA;
- if (RA_is_0) b = 0;
- else b = *rA;
- EA = (b + *rB) & ~0xf;
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG) {
- STORE(EA + 0, 4, (*vS).w[0]);
- STORE(EA + 4, 4, (*vS).w[1]);
- STORE(EA + 8, 4, (*vS).w[2]);
- STORE(EA + 12, 4, (*vS).w[3]);
- } else {
- STORE(EA + 12, 4, (*vS).w[0]);
- STORE(EA + 8, 4, (*vS).w[1]);
- STORE(EA + 4, 4, (*vS).w[2]);
- STORE(EA + 0, 4, (*vS).w[3]);
- }
- PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
- #
- # Vector Add instructions, 6-30 ... 6-40.
- #
- 0.4,6.VS,11.VA,16.VB,21.384:VX:av:vaddcuw %VD, %VA, %VB:Vector Add Carryout Unsigned Word
- uint64_t temp;
- int i;
- for (i = 0; i < 4; i++) {
- temp = (uint64_t)(*vA).w[i] + (uint64_t)(*vB).w[i];
- (*vS).w[i] = temp >> 32;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.10:VX:av:vaddfp %VD, %VA, %VB:Vector Add Floating Point
- int i;
- uint32_t f;
- sim_fpu a, b, d;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&a, (*vA).w[i]);
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_add (&d, &a, &b);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
-
- 0.4,6.VS,11.VA,16.VB,21.768:VX:av:vaddsbs %VD, %VA, %VB:Vector Add Signed Byte Saturate
- int i, sat, tempsat;
- int16_t temp;
- for (i = 0; i < 16; i++) {
- temp = (int16_t)(int8_t)(*vA).b[i] + (int16_t)(int8_t)(*vB).b[i];
- (*vS).b[i] = altivec_signed_saturate_8(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.832:VX:av:vaddshs %VD, %VA, %VB:Vector Add Signed Half Word Saturate
- int i, sat, tempsat;
- int32_t temp, a, b;
- for (i = 0; i < 8; i++) {
- a = (int32_t)(int16_t)(*vA).h[i];
- b = (int32_t)(int16_t)(*vB).h[i];
- temp = a + b;
- (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.896:VX:av:vaddsws %VD, %VA, %VB:Vector Add Signed Word Saturate
- int i, sat, tempsat;
- int64_t temp;
- for (i = 0; i < 4; i++) {
- temp = (int64_t)(int32_t)(*vA).w[i] + (int64_t)(int32_t)(*vB).w[i];
- (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.0:VX:av:vaddubm %VD, %VA, %VB:Vector Add Unsigned Byte Modulo
- int i;
- for (i = 0; i < 16; i++)
- (*vS).b[i] = ((*vA).b[i] + (*vB).b[i]) & 0xff;
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.512:VX:av:vaddubs %VD, %VA, %VB:Vector Add Unsigned Byte Saturate
- int i, sat, tempsat;
- int16_t temp;
- sat = 0;
- for (i = 0; i < 16; i++) {
- temp = (int16_t)(uint8_t)(*vA).b[i] + (int16_t)(uint8_t)(*vB).b[i];
- (*vS).b[i] = altivec_unsigned_saturate_8(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.64:VX:av:vadduhm %VD, %VA, %VB:Vector Add Unsigned Half Word Modulo
- int i;
- for (i = 0; i < 8; i++)
- (*vS).h[i] = ((*vA).h[i] + (*vB).h[i]) & 0xffff;
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.576:VX:av:vadduhs %VD, %VA, %VB:Vector Add Unsigned Half Word Saturate
- int i, sat, tempsat;
- int32_t temp;
- for (i = 0; i < 8; i++) {
- temp = (int32_t)(uint16_t)(*vA).h[i] + (int32_t)(uint16_t)(*vB).h[i];
- (*vS).h[i] = altivec_unsigned_saturate_16(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.128:VX:av:vadduwm %VD, %VA, %VB:Vector Add Unsigned Word Modulo
- int i;
- for (i = 0; i < 4; i++)
- (*vS).w[i] = (*vA).w[i] + (*vB).w[i];
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.640:VX:av:vadduws %VD, %VA, %VB:Vector Add Unsigned Word Saturate
- int i, sat, tempsat;
- int64_t temp;
- for (i = 0; i < 4; i++) {
- temp = (int64_t)(uint32_t)(*vA).w[i] + (int64_t)(uint32_t)(*vB).w[i];
- (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector AND instructions, 6-41, 6-42
- #
- 0.4,6.VS,11.VA,16.VB,21.1028:VX:av:vand %VD, %VA, %VB:Vector Logical AND
- int i;
- for (i = 0; i < 4; i++)
- (*vS).w[i] = (*vA).w[i] & (*vB).w[i];
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1092:VX:av:vandc %VD, %VA, %VB:Vector Logical AND with Compliment
- int i;
- for (i = 0; i < 4; i++)
- (*vS).w[i] = (*vA).w[i] & ~((*vB).w[i]);
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Average instructions, 6-43, 6-48
- #
- 0.4,6.VS,11.VA,16.VB,21.1282:VX:av:vavgsb %VD, %VA, %VB:Vector Average Signed Byte
- int i;
- int16_t temp, a, b;
- for (i = 0; i < 16; i++) {
- a = (int16_t)(int8_t)(*vA).b[i];
- b = (int16_t)(int8_t)(*vB).b[i];
- temp = a + b + 1;
- (*vS).b[i] = (temp >> 1) & 0xff;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1346:VX:av:vavgsh %VD, %VA, %VB:Vector Average Signed Half Word
- int i;
- int32_t temp, a, b;
- for (i = 0; i < 8; i++) {
- a = (int32_t)(int16_t)(*vA).h[i];
- b = (int32_t)(int16_t)(*vB).h[i];
- temp = a + b + 1;
- (*vS).h[i] = (temp >> 1) & 0xffff;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1410:VX:av:vavgsw %VD, %VA, %VB:Vector Average Signed Word
- int i;
- int64_t temp, a, b;
- for (i = 0; i < 4; i++) {
- a = (int64_t)(int32_t)(*vA).w[i];
- b = (int64_t)(int32_t)(*vB).w[i];
- temp = a + b + 1;
- (*vS).w[i] = (temp >> 1) & 0xffffffff;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1026:VX:av:vavgub %VD, %VA, %VB:Vector Average Unsigned Byte
- int i;
- uint16_t temp, a, b;
- for (i = 0; i < 16; i++) {
- a = (*vA).b[i];
- b = (*vB).b[i];
- temp = a + b + 1;
- (*vS).b[i] = (temp >> 1) & 0xff;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1090:VX:av:vavguh %VD, %VA, %VB:Vector Average Unsigned Half Word
- int i;
- uint32_t temp, a, b;
- for (i = 0; i < 8; i++) {
- a = (*vA).h[i];
- b = (*vB).h[i];
- temp = a + b + 1;
- (*vS).h[i] = (temp >> 1) & 0xffff;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1154:VX:av:vavguw %VD, %VA, %VB:Vector Average Unsigned Word
- int i;
- uint64_t temp, a, b;
- for (i = 0; i < 4; i++) {
- a = (*vA).w[i];
- b = (*vB).w[i];
- temp = a + b + 1;
- (*vS).w[i] = (temp >> 1) & 0xffffffff;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Fixed Point Convert instructions, 6-49, 6-50
- #
- 0.4,6.VS,11.UIMM,16.VB,21.842:VX:av:vcfsx %VD, %VB, %UIMM:Vector Convert From Signed Fixed-Point Word
- int i;
- uint32_t f;
- sim_fpu b, div, d;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_u32to (&div, 2 << UIMM, sim_fpu_round_default);
- sim_fpu_div (&d, &b, &div);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.UIMM,16.VB,21.778:VX:av:vcfux %VD, %VA, %UIMM:Vector Convert From Unsigned Fixed-Point Word
- int i;
- uint32_t f;
- sim_fpu b, d, div;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_u32to (&div, 2 << UIMM, sim_fpu_round_default);
- sim_fpu_div (&d, &b, &div);
- sim_fpu_to32u (&f, &d, sim_fpu_round_default);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- #
- # Vector Compare instructions, 6-51 ... 6-64
- #
- 0.4,6.VS,11.VA,16.VB,21.RC,22.966:VXR:av:vcmpbpfpx %VD, %VA, %VB:Vector Compare Bounds Floating Point
- int i, le, ge;
- sim_fpu a, b, d;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&a, (*vA).w[i]);
- sim_fpu_32to (&b, (*vB).w[i]);
- le = sim_fpu_is_le(&a, &b);
- ge = sim_fpu_is_ge(&a, &b);
- (*vS).w[i] = (le ? 0 : 1 << 31) | (ge ? 0 : 1 << 30);
- }
- if (RC)
- ALTIVEC_SET_CR6(vS, 0);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.198:VXR:av:vcmpeqfpx %VD, %VA, %VB:Vector Compare Equal-to-Floating Point
- int i;
- sim_fpu a, b;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&a, (*vA).w[i]);
- sim_fpu_32to (&b, (*vB).w[i]);
- if (sim_fpu_is_eq(&a, &b))
- (*vS).w[i] = 0xffffffff;
- else
- (*vS).w[i] = 0;
- }
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.6:VXR:av:vcmpequbx %VD, %VA, %VB:Vector Compare Equal-to Unsigned Byte
- int i;
- for (i = 0; i < 16; i++)
- if ((*vA).b[i] == (*vB).b[i])
- (*vS).b[i] = 0xff;
- else
- (*vS).b[i] = 0;
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.70:VXR:av:vcmpequhx %VD, %VA, %VB:Vector Compare Equal-to Unsigned Half Word
- int i;
- for (i = 0; i < 8; i++)
- if ((*vA).h[i] == (*vB).h[i])
- (*vS).h[i] = 0xffff;
- else
- (*vS).h[i] = 0;
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.134:VXR:av:vcmpequwx %VD, %VA, %VB:Vector Compare Equal-to Unsigned Word
- int i;
- for (i = 0; i < 4; i++)
- if ((*vA).w[i] == (*vB).w[i])
- (*vS).w[i] = 0xffffffff;
- else
- (*vS).w[i] = 0;
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.454:VXR:av:vcmpgefpx %VD, %VA, %VB:Vector Compare Greater-Than-or-Equal-to Floating Point
- int i;
- sim_fpu a, b;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&a, (*vA).w[i]);
- sim_fpu_32to (&b, (*vB).w[i]);
- if (sim_fpu_is_ge(&a, &b))
- (*vS).w[i] = 0xffffffff;
- else
- (*vS).w[i] = 0;
- }
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.710:VXR:av:vcmpgtfpx %VD, %VA, %VB:Vector Compare Greater-Than Floating Point
- int i;
- sim_fpu a, b;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&a, (*vA).w[i]);
- sim_fpu_32to (&b, (*vB).w[i]);
- if (sim_fpu_is_gt(&a, &b))
- (*vS).w[i] = 0xffffffff;
- else
- (*vS).w[i] = 0;
- }
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.774:VXR:av:vcmpgtsbx %VD, %VA, %VB:Vector Compare Greater-Than Signed Byte
- int i;
- int8_t a, b;
- for (i = 0; i < 16; i++) {
- a = (*vA).b[i];
- b = (*vB).b[i];
- if (a > b)
- (*vS).b[i] = 0xff;
- else
- (*vS).b[i] = 0;
- }
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.838:VXR:av:vcmpgtshx %VD, %VA, %VB:Vector Compare Greater-Than Signed Half Word
- int i;
- int16_t a, b;
- for (i = 0; i < 8; i++) {
- a = (*vA).h[i];
- b = (*vB).h[i];
- if (a > b)
- (*vS).h[i] = 0xffff;
- else
- (*vS).h[i] = 0;
- }
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.902:VXR:av:vcmpgtswx %VD, %VA, %VB:Vector Compare Greater-Than Signed Word
- int i;
- int32_t a, b;
- for (i = 0; i < 4; i++) {
- a = (*vA).w[i];
- b = (*vB).w[i];
- if (a > b)
- (*vS).w[i] = 0xffffffff;
- else
- (*vS).w[i] = 0;
- }
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.518:VXR:av:vcmpgtubx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Byte
- int i;
- uint8_t a, b;
- for (i = 0; i < 16; i++) {
- a = (*vA).b[i];
- b = (*vB).b[i];
- if (a > b)
- (*vS).b[i] = 0xff;
- else
- (*vS).b[i] = 0;
- }
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.582:VXR:av:vcmpgtuhx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Half Word
- int i;
- uint16_t a, b;
- for (i = 0; i < 8; i++) {
- a = (*vA).h[i];
- b = (*vB).h[i];
- if (a > b)
- (*vS).h[i] = 0xffff;
- else
- (*vS).h[i] = 0;
- }
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- 0.4,6.VS,11.VA,16.VB,21.RC,22.646:VXR:av:vcmpgtuwx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Word
- int i;
- uint32_t a, b;
- for (i = 0; i < 4; i++) {
- a = (*vA).w[i];
- b = (*vB).w[i];
- if (a > b)
- (*vS).w[i] = 0xffffffff;
- else
- (*vS).w[i] = 0;
- }
- if (RC)
- ALTIVEC_SET_CR6(vS, 1);
- PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
- #
- # Vector Convert instructions, 6-65, 6-66.
- #
- 0.4,6.VS,11.UIMM,16.VB,21.970:VX:av:vctsxs %VD, %VB, %UIMM:Vector Convert to Signed Fixed-Point Word Saturate
- int i, sat, tempsat;
- int64_t temp;
- sim_fpu a, b, m;
- sat = 0;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_u32to (&m, 2 << UIMM, sim_fpu_round_default);
- sim_fpu_mul (&a, &b, &m);
- sim_fpu_to64i (&temp, &a, sim_fpu_round_default);
- (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.UIMM,16.VB,21.906:VX:av:vctuxs %VD, %VB, %UIMM:Vector Convert to Unsigned Fixed-Point Word Saturate
- int i, sat, tempsat;
- int64_t temp;
- sim_fpu a, b, m;
- sat = 0;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_u32to (&m, 2 << UIMM, sim_fpu_round_default);
- sim_fpu_mul (&a, &b, &m);
- sim_fpu_to64u (&temp, &a, sim_fpu_round_default);
- (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
- #
- # Vector Estimate instructions, 6-67 ... 6-70.
- #
- 0.4,6.VS,11.0,16.VB,21.394:VX:av:vexptefp %VD, %VB:Vector 2 Raised to the Exponent Estimate Floating Point
- int i;
- uint32_t f;
- int32_t bi;
- sim_fpu b, d;
- for (i = 0; i < 4; i++) {
- /*HACK!*/
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_to32i (&bi, &b, sim_fpu_round_default);
- bi = 2 ^ bi;
- sim_fpu_32to (&d, bi);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.0,16.VB,21.458:VX:av:vlogefp %VD, %VB:Vector Log2 Estimate Floating Point
- int i;
- uint32_t c, u, f;
- sim_fpu b, cfpu, d;
- for (i = 0; i < 4; i++) {
- /*HACK!*/
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_to32u (&u, &b, sim_fpu_round_default);
- for (c = 0; (u /= 2) > 1; c++)
- ;
- sim_fpu_32to (&cfpu, c);
- sim_fpu_add (&d, &b, &cfpu);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
- #
- # Vector Multiply Add instruction, 6-71
- #
- 0.4,6.VS,11.VA,16.VB,21.VC,26.46:VAX:av:vmaddfp %VD, %VA, %VB, %VC:Vector Multiply Add Floating Point
- int i;
- uint32_t f;
- sim_fpu a, b, c, d, e;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&a, (*vA).w[i]);
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_32to (&c, (*vC).w[i]);
- sim_fpu_mul (&e, &a, &c);
- sim_fpu_add (&d, &e, &b);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- #
- # Vector Maximum instructions, 6-72 ... 6-78.
- #
- 0.4,6.VS,11.VA,16.VB,21.1034:VX:av:vmaxfp %VD, %VA, %VB:Vector Maximum Floating Point
- int i;
- uint32_t f;
- sim_fpu a, b, d;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&a, (*vA).w[i]);
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_max (&d, &a, &b);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.258:VX:av:vmaxsb %VD, %VA, %VB:Vector Maximum Signed Byte
- int i;
- int8_t a, b;
- for (i = 0; i < 16; i++) {
- a = (*vA).b[i];
- b = (*vB).b[i];
- if (a > b)
- (*vS).b[i] = a;
- else
- (*vS).b[i] = b;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.322:VX:av:vmaxsh %VD, %VA, %VB:Vector Maximum Signed Half Word
- int i;
- int16_t a, b;
- for (i = 0; i < 8; i++) {
- a = (*vA).h[i];
- b = (*vB).h[i];
- if (a > b)
- (*vS).h[i] = a;
- else
- (*vS).h[i] = b;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.386:VX:av:vmaxsw %VD, %VA, %VB:Vector Maximum Signed Word
- int i;
- int32_t a, b;
- for (i = 0; i < 4; i++) {
- a = (*vA).w[i];
- b = (*vB).w[i];
- if (a > b)
- (*vS).w[i] = a;
- else
- (*vS).w[i] = b;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.2:VX:av:vmaxub %VD, %VA, %VB:Vector Maximum Unsigned Byte
- int i;
- uint8_t a, b;
- for (i = 0; i < 16; i++) {
- a = (*vA).b[i];
- b = (*vB).b[i];
- if (a > b)
- (*vS).b[i] = a;
- else
- (*vS).b[i] = b;
- };
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.66:VX:av:vmaxus %VD, %VA, %VB:Vector Maximum Unsigned Half Word
- int i;
- uint16_t a, b;
- for (i = 0; i < 8; i++) {
- a = (*vA).h[i];
- b = (*vB).h[i];
- if (a > b)
- (*vS).h[i] = a;
- else
- (*vS).h[i] = b;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.130:VX:av:vmaxuw %VD, %VA, %VB:Vector Maximum Unsigned Word
- int i;
- uint32_t a, b;
- for (i = 0; i < 4; i++) {
- a = (*vA).w[i];
- b = (*vB).w[i];
- if (a > b)
- (*vS).w[i] = a;
- else
- (*vS).w[i] = b;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Multiple High instructions, 6-79, 6-80.
- #
- 0.4,6.VS,11.VA,16.VB,21.VC,26.32:VAX:av:vmhaddshs %VD, %VA, %VB, %VC:Vector Multiple High and Add Signed Half Word Saturate
- int i, sat, tempsat;
- int16_t a, b;
- int32_t prod, temp, c;
- for (i = 0; i < 8; i++) {
- a = (*vA).h[i];
- b = (*vB).h[i];
- c = (int32_t)(int16_t)(*vC).h[i];
- prod = (int32_t)a * (int32_t)b;
- temp = (prod >> 15) + c;
- (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.VC,26.33:VAX:av:vmhraddshs %VD, %VA, %VB, %VC:Vector Multiple High Round and Add Signed Half Word Saturate
- int i, sat, tempsat;
- int16_t a, b;
- int32_t prod, temp, c;
- for (i = 0; i < 8; i++) {
- a = (*vA).h[i];
- b = (*vB).h[i];
- c = (int32_t)(int16_t)(*vC).h[i];
- prod = (int32_t)a * (int32_t)b;
- prod += 0x4000;
- temp = (prod >> 15) + c;
- (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- #
- # Vector Minimum instructions, 6-81 ... 6-87
- #
- 0.4,6.VS,11.VA,16.VB,21.1098:VX:av:vminfp %VD, %VA, %VB:Vector Minimum Floating Point
- int i;
- uint32_t f;
- sim_fpu a, b, d;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&a, (*vA).w[i]);
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_min (&d, &a, &b);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.770:VX:av:vminsb %VD, %VA, %VB:Vector Minimum Signed Byte
- int i;
- int8_t a, b;
- for (i = 0; i < 16; i++) {
- a = (*vA).b[i];
- b = (*vB).b[i];
- if (a < b)
- (*vS).b[i] = a;
- else
- (*vS).b[i] = b;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.834:VX:av:vminsh %VD, %VA, %VB:Vector Minimum Signed Half Word
- int i;
- int16_t a, b;
- for (i = 0; i < 8; i++) {
- a = (*vA).h[i];
- b = (*vB).h[i];
- if (a < b)
- (*vS).h[i] = a;
- else
- (*vS).h[i] = b;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.898:VX:av:vminsw %VD, %VA, %VB:Vector Minimum Signed Word
- int i;
- int32_t a, b;
- for (i = 0; i < 4; i++) {
- a = (*vA).w[i];
- b = (*vB).w[i];
- if (a < b)
- (*vS).w[i] = a;
- else
- (*vS).w[i] = b;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.514:VX:av:vminub %VD, %VA, %VB:Vector Minimum Unsigned Byte
- int i;
- uint8_t a, b;
- for (i = 0; i < 16; i++) {
- a = (*vA).b[i];
- b = (*vB).b[i];
- if (a < b)
- (*vS).b[i] = a;
- else
- (*vS).b[i] = b;
- };
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.578:VX:av:vminuh %VD, %VA, %VB:Vector Minimum Unsigned Half Word
- int i;
- uint16_t a, b;
- for (i = 0; i < 8; i++) {
- a = (*vA).h[i];
- b = (*vB).h[i];
- if (a < b)
- (*vS).h[i] = a;
- else
- (*vS).h[i] = b;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.642:VX:av:vminuw %VD, %VA, %VB:Vector Minimum Unsigned Word
- int i;
- uint32_t a, b;
- for (i = 0; i < 4; i++) {
- a = (*vA).w[i];
- b = (*vB).w[i];
- if (a < b)
- (*vS).w[i] = a;
- else
- (*vS).w[i] = b;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Multiply Low instruction, 6-88
- #
- 0.4,6.VS,11.VA,16.VB,21.VC,26.34:VAX:av:vmladduhm %VD, %VA, %VB, %VC:Vector Multiply Low and Add Unsigned Half Word Modulo
- int i;
- uint16_t a, b, c;
- uint32_t prod;
- for (i = 0; i < 8; i++) {
- a = (*vA).h[i];
- b = (*vB).h[i];
- c = (*vC).h[i];
- prod = (uint32_t)a * (uint32_t)b;
- (*vS).h[i] = (prod + c) & 0xffff;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- #
- # Vector Merge instructions, 6-89 ... 6-94
- #
- 0.4,6.VS,11.VA,16.VB,21.12:VX:av:vmrghb %VD, %VA, %VB:Vector Merge High Byte
- int i;
- for (i = 0; i < 16; i += 2) {
- (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(i/2)];
- (*vS).b[AV_BINDEX(i+1)] = (*vB).b[AV_BINDEX(i/2)];
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.76:VX:av:vmrghh %VD, %VA, %VB:Vector Merge High Half Word
- int i;
- for (i = 0; i < 8; i += 2) {
- (*vS).h[AV_HINDEX(i)] = (*vA).h[AV_HINDEX(i/2)];
- (*vS).h[AV_HINDEX(i+1)] = (*vB).h[AV_HINDEX(i/2)];
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.140:VX:av:vmrghw %VD, %VA, %VB:Vector Merge High Word
- int i;
- for (i = 0; i < 4; i += 2) {
- (*vS).w[i] = (*vA).w[i/2];
- (*vS).w[i+1] = (*vB).w[i/2];
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.268:VX:av:vmrglb %VD, %VA, %VB:Vector Merge Low Byte
- int i;
- for (i = 0; i < 16; i += 2) {
- (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX((i/2) + 8)];
- (*vS).b[AV_BINDEX(i+1)] = (*vB).b[AV_BINDEX((i/2) + 8)];
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.332:VX:av:vmrglh %VD, %VA, %VB:Vector Merge Low Half Word
- int i;
- for (i = 0; i < 8; i += 2) {
- (*vS).h[AV_HINDEX(i)] = (*vA).h[AV_HINDEX((i/2) + 4)];
- (*vS).h[AV_HINDEX(i+1)] = (*vB).h[AV_HINDEX((i/2) + 4)];
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.396:VX:av:vmrglw %VD, %VA, %VB:Vector Merge Low Word
- int i;
- for (i = 0; i < 4; i += 2) {
- (*vS).w[i] = (*vA).w[(i/2) + 2];
- (*vS).w[i+1] = (*vB).w[(i/2) + 2];
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Multiply Sum instructions, 6-95 ... 6-100
- #
- 0.4,6.VS,11.VA,16.VB,21.VC,26.37:VAX:av:vmsummbm %VD, %VA, %VB, %VC:Vector Multiply Sum Mixed-Sign Byte Modulo
- int i, j;
- int32_t temp;
- int16_t prod, a;
- uint16_t b;
- for (i = 0; i < 4; i++) {
- temp = (*vC).w[i];
- for (j = 0; j < 4; j++) {
- a = (int16_t)(int8_t)(*vA).b[i*4+j];
- b = (*vB).b[i*4+j];
- prod = a * b;
- temp += (int32_t)prod;
- }
- (*vS).w[i] = temp;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.VC,26.40:VAX:av:vmsumshm %VD, %VA, %VB, %VC:Vector Multiply Sum Signed Half Word Modulo
- int i, j;
- int32_t temp, prod, a, b;
- for (i = 0; i < 4; i++) {
- temp = (*vC).w[i];
- for (j = 0; j < 2; j++) {
- a = (int32_t)(int16_t)(*vA).h[i*2+j];
- b = (int32_t)(int16_t)(*vB).h[i*2+j];
- prod = a * b;
- temp += prod;
- }
- (*vS).w[i] = temp;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.VC,26.41:VAX:av:vmsumshs %VD, %VA, %VB, %VC:Vector Multiply Sum Signed Half Word Saturate
- int i, j, sat, tempsat;
- int64_t temp;
- int32_t prod, a, b;
- sat = 0;
- for (i = 0; i < 4; i++) {
- temp = (int64_t)(int32_t)(*vC).w[i];
- for (j = 0; j < 2; j++) {
- a = (int32_t)(int16_t)(*vA).h[i*2+j];
- b = (int32_t)(int16_t)(*vB).h[i*2+j];
- prod = a * b;
- temp += (int64_t)prod;
- }
- (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.VC,26.36:VAX:av:vmsumubm %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Byte Modulo
- int i, j;
- uint32_t temp;
- uint16_t prod, a, b;
- for (i = 0; i < 4; i++) {
- temp = (*vC).w[i];
- for (j = 0; j < 4; j++) {
- a = (*vA).b[i*4+j];
- b = (*vB).b[i*4+j];
- prod = a * b;
- temp += prod;
- }
- (*vS).w[i] = temp;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.VC,26.38:VAX:av:vmsumuhm %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Half Word Modulo
- int i, j;
- uint32_t temp, prod, a, b;
- for (i = 0; i < 4; i++) {
- temp = (*vC).w[i];
- for (j = 0; j < 2; j++) {
- a = (*vA).h[i*2+j];
- b = (*vB).h[i*2+j];
- prod = a * b;
- temp += prod;
- }
- (*vS).w[i] = temp;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.VC,26.39:VAX:av:vmsumuhs %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Half Word Saturate
- int i, j, sat, tempsat;
- uint32_t temp, prod, a, b;
- sat = 0;
- for (i = 0; i < 4; i++) {
- temp = (*vC).w[i];
- for (j = 0; j < 2; j++) {
- a = (*vA).h[i*2+j];
- b = (*vB).h[i*2+j];
- prod = a * b;
- temp += prod;
- }
- (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- #
- # Vector Multiply Even/Odd instructions, 6-101 ... 6-108
- #
- 0.4,6.VS,11.VA,16.VB,21.776:VX:av:vmulesb %VD, %VA, %VB:Vector Multiply Even Signed Byte
- int i;
- int8_t a, b;
- int16_t prod;
- for (i = 0; i < 8; i++) {
- a = (*vA).b[AV_BINDEX(i*2)];
- b = (*vB).b[AV_BINDEX(i*2)];
- prod = a * b;
- (*vS).h[AV_HINDEX(i)] = prod;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.840:VX:av:vmulesh %VD, %VA, %VB:Vector Multiply Even Signed Half Word
- int i;
- int16_t a, b;
- int32_t prod;
- for (i = 0; i < 4; i++) {
- a = (*vA).h[AV_HINDEX(i*2)];
- b = (*vB).h[AV_HINDEX(i*2)];
- prod = a * b;
- (*vS).w[i] = prod;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.520:VX:av:vmuleub %VD, %VA, %VB:Vector Multiply Even Unsigned Byte
- int i;
- uint8_t a, b;
- uint16_t prod;
- for (i = 0; i < 8; i++) {
- a = (*vA).b[AV_BINDEX(i*2)];
- b = (*vB).b[AV_BINDEX(i*2)];
- prod = a * b;
- (*vS).h[AV_HINDEX(i)] = prod;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.584:VX:av:vmuleuh %VD, %VA, %VB:Vector Multiply Even Unsigned Half Word
- int i;
- uint16_t a, b;
- uint32_t prod;
- for (i = 0; i < 4; i++) {
- a = (*vA).h[AV_HINDEX(i*2)];
- b = (*vB).h[AV_HINDEX(i*2)];
- prod = a * b;
- (*vS).w[i] = prod;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.264:VX:av:vmulosb %VD, %VA, %VB:Vector Multiply Odd Signed Byte
- int i;
- int8_t a, b;
- int16_t prod;
- for (i = 0; i < 8; i++) {
- a = (*vA).b[AV_BINDEX((i*2)+1)];
- b = (*vB).b[AV_BINDEX((i*2)+1)];
- prod = a * b;
- (*vS).h[AV_HINDEX(i)] = prod;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.328:VX:av:vmulosh %VD, %VA, %VB:Vector Multiply Odd Signed Half Word
- int i;
- int16_t a, b;
- int32_t prod;
- for (i = 0; i < 4; i++) {
- a = (*vA).h[AV_HINDEX((i*2)+1)];
- b = (*vB).h[AV_HINDEX((i*2)+1)];
- prod = a * b;
- (*vS).w[i] = prod;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.8:VX:av:vmuloub %VD, %VA, %VB:Vector Multiply Odd Unsigned Byte
- int i;
- uint8_t a, b;
- uint16_t prod;
- for (i = 0; i < 8; i++) {
- a = (*vA).b[AV_BINDEX((i*2)+1)];
- b = (*vB).b[AV_BINDEX((i*2)+1)];
- prod = a * b;
- (*vS).h[AV_HINDEX(i)] = prod;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.72:VX:av:vmulouh %VD, %VA, %VB:Vector Multiply Odd Unsigned Half Word
- int i;
- uint16_t a, b;
- uint32_t prod;
- for (i = 0; i < 4; i++) {
- a = (*vA).h[AV_HINDEX((i*2)+1)];
- b = (*vB).h[AV_HINDEX((i*2)+1)];
- prod = a * b;
- (*vS).w[i] = prod;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Negative Multiply-Subtract instruction, 6-109
- #
- 0.4,6.VS,11.VA,16.VB,21.VC,26.47:VX:av:vnmsubfp %VD, %VA, %VB, %VC:Vector Negative Multiply-Subtract Floating Point
- int i;
- uint32_t f;
- sim_fpu a, b, c, d, i1, i2;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&a, (*vA).w[i]);
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_32to (&c, (*vC).w[i]);
- sim_fpu_mul (&i1, &a, &c);
- sim_fpu_sub (&i2, &i1, &b);
- sim_fpu_neg (&d, &i2);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- #
- # Vector Logical OR instructions, 6-110, 6-111, 6-177
- #
- 0.4,6.VS,11.VA,16.VB,21.1284:VX:av:vnor %VD, %VA, %VB:Vector Logical NOR
- int i;
- for (i = 0; i < 4; i++)
- (*vS).w[i] = ~((*vA).w[i] | (*vB).w[i]);
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1156:VX:av:vor %VD, %VA, %VB:Vector Logical OR
- int i;
- for (i = 0; i < 4; i++)
- (*vS).w[i] = (*vA).w[i] | (*vB).w[i];
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1220:VX:av:vxor %VD, %VA, %VB:Vector Logical XOR
- int i;
- for (i = 0; i < 4; i++)
- (*vS).w[i] = (*vA).w[i] ^ (*vB).w[i];
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Permute instruction, 6-112
- #
- 0.4,6.VS,11.VA,16.VB,21.VC,26.43:VX:av:vperm %VD, %VA, %VB, %VC:Vector Permute
- int i, who;
- /* The permutation vector might have us read into the source vectors
- back at positions before the iteration index, so we must latch the
- sources to prevent early-clobbering in case the destination vector
- is the same as one of them. */
- vreg myvA = (*vA), myvB = (*vB);
- for (i = 0; i < 16; i++) {
- who = (*vC).b[AV_BINDEX(i)] & 0x1f;
- if (who & 0x10)
- (*vS).b[AV_BINDEX(i)] = myvB.b[AV_BINDEX(who & 0xf)];
- else
- (*vS).b[AV_BINDEX(i)] = myvA.b[AV_BINDEX(who & 0xf)];
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- #
- # Vector Pack instructions, 6-113 ... 6-121
- #
- 0.4,6.VS,11.VA,16.VB,21.782:VX:av:vpkpx %VD, %VA, %VB:Vector Pack Pixel32
- int i;
- for (i = 0; i < 4; i++) {
- (*vS).h[AV_HINDEX(i+4)] = ((((*vB).w[i]) >> 9) & 0xfc00)
- | ((((*vB).w[i]) >> 6) & 0x03e0)
- | ((((*vB).w[i]) >> 3) & 0x001f);
- (*vS).h[AV_HINDEX(i)] = ((((*vA).w[i]) >> 9) & 0xfc00)
- | ((((*vA).w[i]) >> 6) & 0x03e0)
- | ((((*vA).w[i]) >> 3) & 0x001f);
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.398:VX:av:vpkshss %VD, %VA, %VB:Vector Pack Signed Half Word Signed Saturate
- int i, sat, tempsat;
- int16_t temp;
- sat = 0;
- for (i = 0; i < 16; i++) {
- if (i < 8)
- temp = (*vA).h[AV_HINDEX(i)];
- else
- temp = (*vB).h[AV_HINDEX(i-8)];
- (*vS).b[AV_BINDEX(i)] = altivec_signed_saturate_8(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.270:VX:av:vpkshus %VD, %VA, %VB:Vector Pack Signed Half Word Unsigned Saturate
- int i, sat, tempsat;
- int16_t temp;
- sat = 0;
- for (i = 0; i < 16; i++) {
- if (i < 8)
- temp = (*vA).h[AV_HINDEX(i)];
- else
- temp = (*vB).h[AV_HINDEX(i-8)];
- (*vS).b[AV_BINDEX(i)] = altivec_unsigned_saturate_8(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.462:VX:av:vpkswss %VD, %VA, %VB:Vector Pack Signed Word Signed Saturate
- int i, sat, tempsat;
- int32_t temp;
- sat = 0;
- for (i = 0; i < 8; i++) {
- if (i < 4)
- temp = (*vA).w[i];
- else
- temp = (*vB).w[i-4];
- (*vS).h[AV_HINDEX(i)] = altivec_signed_saturate_16(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.334:VX:av:vpkswus %VD, %VA, %VB:Vector Pack Signed Word Unsigned Saturate
- int i, sat, tempsat;
- int32_t temp;
- sat = 0;
- for (i = 0; i < 8; i++) {
- if (i < 4)
- temp = (*vA).w[i];
- else
- temp = (*vB).w[i-4];
- (*vS).h[AV_HINDEX(i)] = altivec_unsigned_saturate_16(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.14:VX:av:vpkuhum %VD, %VA, %VB:Vector Pack Unsigned Half Word Unsigned Modulo
- int i;
- for (i = 0; i < 16; i++)
- if (i < 8)
- (*vS).b[AV_BINDEX(i)] = (*vA).h[AV_HINDEX(i)];
- else
- (*vS).b[AV_BINDEX(i)] = (*vB).h[AV_HINDEX(i-8)];
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.142:VX:av:vpkuhus %VD, %VA, %VB:Vector Pack Unsigned Half Word Unsigned Saturate
- int i, sat, tempsat;
- int16_t temp;
- sat = 0;
- for (i = 0; i < 16; i++) {
- if (i < 8)
- temp = (*vA).h[AV_HINDEX(i)];
- else
- temp = (*vB).h[AV_HINDEX(i-8)];
- /* force positive in int16_t, ok as we'll toss the bit away anyway */
- temp &= ~0x8000;
- (*vS).b[AV_BINDEX(i)] = altivec_unsigned_saturate_8(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.78:VX:av:vpkuwum %VD, %VA, %VB:Vector Pack Unsigned Word Unsigned Modulo
- int i;
- for (i = 0; i < 8; i++)
- if (i < 8)
- (*vS).h[AV_HINDEX(i)] = (*vA).w[i];
- else
- (*vS).h[AV_HINDEX(i)] = (*vB).w[i-8];
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.206:VX:av:vpkuwus %VD, %VA, %VB:Vector Pack Unsigned Word Unsigned Saturate
- int i, sat, tempsat;
- int32_t temp;
- sat = 0;
- for (i = 0; i < 8; i++) {
- if (i < 4)
- temp = (*vA).w[i];
- else
- temp = (*vB).w[i-4];
- /* force positive in int32_t, ok as we'll toss the bit away anyway */
- temp &= ~0x80000000;
- (*vS).h[AV_HINDEX(i)] = altivec_unsigned_saturate_16(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Reciprocal instructions, 6-122, 6-123, 6-131
- #
- 0.4,6.VS,11.0,16.VB,21.266:VX:av:vrefp %VD, %VB:Vector Reciprocal Estimate Floating Point
- int i;
- uint32_t f;
- sim_fpu op, d;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&op, (*vB).w[i]);
- sim_fpu_div (&d, &sim_fpu_one, &op);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.0,16.VB,21.330:VX:av:vrsqrtefp %VD, %VB:Vector Reciprocal Square Root Estimate Floating Point
- int i;
- uint32_t f;
- sim_fpu op, i1, one, d;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&op, (*vB).w[i]);
- sim_fpu_sqrt (&i1, &op);
- sim_fpu_div (&d, &sim_fpu_one, &i1);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- #
- # Vector Round instructions, 6-124 ... 6-127
- #
- 0.4,6.VS,11.0,16.VB,21.714:VX:av:vrfim %VD, %VB:Vector Round to Floating-Point Integer towards Minus Infinity
- int i;
- uint32_t f;
- sim_fpu op;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&op, (*vB).w[i]);
- sim_fpu_round_32(&op, sim_fpu_round_down, sim_fpu_denorm_default);
- sim_fpu_to32 (&f, &op);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.0,16.VB,21.522:VX:av:vrfin %VD, %VB:Vector Round to Floating-Point Integer Nearest
- int i;
- uint32_t f;
- sim_fpu op;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&op, (*vB).w[i]);
- sim_fpu_round_32(&op, sim_fpu_round_near, sim_fpu_denorm_default);
- sim_fpu_to32 (&f, &op);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.0,16.VB,21.650:VX:av:vrfip %VD, %VB:Vector Round to Floating-Point Integer towards Plus Infinity
- int i;
- uint32_t f;
- sim_fpu op;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&op, (*vB).w[i]);
- sim_fpu_round_32(&op, sim_fpu_round_up, sim_fpu_denorm_default);
- sim_fpu_to32 (&f, &op);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.0,16.VB,21.586:VX:av:vrfiz %VD, %VB:Vector Round to Floating-Point Integer towards Zero
- int i;
- uint32_t f;
- sim_fpu op;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&op, (*vB).w[i]);
- sim_fpu_round_32(&op, sim_fpu_round_zero, sim_fpu_denorm_default);
- sim_fpu_to32 (&f, &op);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- #
- # Vector Rotate Left instructions, 6-128 ... 6-130
- #
- 0.4,6.VS,11.VA,16.VB,21.4:VX:av:vrlb %VD, %VA, %VB:Vector Rotate Left Integer Byte
- int i;
- uint16_t temp;
- for (i = 0; i < 16; i++) {
- temp = (uint16_t)(*vA).b[i] << (((*vB).b[i]) & 7);
- (*vS).b[i] = (temp & 0xff) | ((temp >> 8) & 0xff);
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.68:VX:av:vrlh %VD, %VA, %VB:Vector Rotate Left Integer Half Word
- int i;
- uint32_t temp;
- for (i = 0; i < 8; i++) {
- temp = (uint32_t)(*vA).h[i] << (((*vB).h[i]) & 0xf);
- (*vS).h[i] = (temp & 0xffff) | ((temp >> 16) & 0xffff);
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.132:VX:av:vrlw %VD, %VA, %VB:Vector Rotate Left Integer Word
- int i;
- uint64_t temp;
- for (i = 0; i < 4; i++) {
- temp = (uint64_t)(*vA).w[i] << (((*vB).w[i]) & 0x1f);
- (*vS).w[i] = (temp & 0xffffffff) | ((temp >> 32) & 0xffffffff);
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Conditional Select instruction, 6-133
- #
- 0.4,6.VS,11.VA,16.VB,21.VC,26.42:VAX:av:vsel %VD, %VA, %VB, %VC:Vector Conditional Select
- int i;
- uint32_t c;
- for (i = 0; i < 4; i++) {
- c = (*vC).w[i];
- (*vS).w[i] = ((*vB).w[i] & c) | ((*vA).w[i] & ~c);
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
- #
- # Vector Shift Left instructions, 6-134 ... 6-139
- #
- 0.4,6.VS,11.VA,16.VB,21.452:VX:av:vsl %VD, %VA, %VB:Vector Shift Left
- int sh, i, j, carry, new_carry;
- sh = (*vB).b[0] & 7; /* don't bother checking everything */
- carry = 0;
- for (j = 3; j >= 0; j--) {
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG)
- i = j;
- else
- i = (j + 2) % 4;
- new_carry = (*vA).w[i] >> (32 - sh);
- (*vS).w[i] = ((*vA).w[i] << sh) | carry;
- carry = new_carry;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.260:VX:av:vslb %VD, %VA, %VB:Vector Shift Left Integer Byte
- int i, sh;
- for (i = 0; i < 16; i++) {
- sh = ((*vB).b[i]) & 7;
- (*vS).b[i] = (*vA).b[i] << sh;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.0,22.SH,26.44:VX:av:vsldol %VD, %VA, %VB:Vector Shift Left Double by Octet Immediate
- int i, j;
- for (j = 0, i = SH; i < 16; i++)
- (*vS).b[j++] = (*vA).b[i];
- for (i = 0; i < SH; i++)
- (*vS).b[j++] = (*vB).b[i];
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.324:VX:av:vslh %VD, %VA, %VB:Vector Shift Left Half Word
- int i, sh;
- for (i = 0; i < 8; i++) {
- sh = ((*vB).h[i]) & 0xf;
- (*vS).h[i] = (*vA).h[i] << sh;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1036:VX:av:vslo %VD, %VA, %VB:Vector Shift Left by Octet
- int i, sh;
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG)
- sh = ((*vB).b[AV_BINDEX(15)] >> 3) & 0xf;
- else
- sh = ((*vB).b[AV_BINDEX(0)] >> 3) & 0xf;
- for (i = 0; i < 16; i++) {
- if (15 - i > sh)
- (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(i + sh)];
- else
- (*vS).b[AV_BINDEX(i)] = 0;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.388:VX:av:vslw %VD, %VA, %VB:Vector Shift Left Integer Word
- int i, sh;
- for (i = 0; i < 4; i++) {
- sh = ((*vB).w[i]) & 0x1f;
- (*vS).w[i] = (*vA).w[i] << sh;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Splat instructions, 6-140 ... 6-145
- #
- 0.4,6.VS,11.UIMM,16.VB,21.524:VX:av:vspltb %VD, %VB, %UIMM:Vector Splat Byte
- int i;
- uint8_t b;
- b = (*vB).b[AV_BINDEX(UIMM & 0xf)];
- for (i = 0; i < 16; i++)
- (*vS).b[i] = b;
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.UIMM,16.VB,21.588:VX:av:vsplth %VD, %VB, %UIMM:Vector Splat Half Word
- int i;
- uint16_t h;
- h = (*vB).h[AV_HINDEX(UIMM & 0x7)];
- for (i = 0; i < 8; i++)
- (*vS).h[i] = h;
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.SIMM,16.0,21.780:VX:av:vspltisb %VD, %SIMM:Vector Splat Immediate Signed Byte
- int i;
- int8_t b = SIMM;
- /* manual 5-bit signed extension */
- if (b & 0x10)
- b -= 0x20;
- for (i = 0; i < 16; i++)
- (*vS).b[i] = b;
- PPC_INSN_VR(VS_BITMASK, 0);
- 0.4,6.VS,11.SIMM,16.0,21.844:VX:av:vspltish %VD, %SIMM:Vector Splat Immediate Signed Half Word
- int i;
- int16_t h = SIMM;
- /* manual 5-bit signed extension */
- if (h & 0x10)
- h -= 0x20;
- for (i = 0; i < 8; i++)
- (*vS).h[i] = h;
- PPC_INSN_VR(VS_BITMASK, 0);
- 0.4,6.VS,11.SIMM,16.0,21.908:VX:av:vspltisw %VD, %SIMM:Vector Splat Immediate Signed Word
- int i;
- int32_t w = SIMM;
- /* manual 5-bit signed extension */
- if (w & 0x10)
- w -= 0x20;
- for (i = 0; i < 4; i++)
- (*vS).w[i] = w;
- PPC_INSN_VR(VS_BITMASK, 0);
- 0.4,6.VS,11.UIMM,16.VB,21.652:VX:av:vspltw %VD, %VB, %UIMM:Vector Splat Word
- int i;
- uint32_t w;
- w = (*vB).w[UIMM & 0x3];
- for (i = 0; i < 4; i++)
- (*vS).w[i] = w;
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- #
- # Vector Shift Right instructions, 6-146 ... 6-154
- #
- 0.4,6.VS,11.VA,16.VB,21.708:VX:av:vsr %VD, %VA, %VB:Vector Shift Right
- int sh, i, j, carry, new_carry;
- sh = (*vB).b[0] & 7; /* don't bother checking everything */
- carry = 0;
- for (j = 0; j < 4; j++) {
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG)
- i = j;
- else
- i = (j + 2) % 4;
- new_carry = (*vA).w[i] << (32 - sh);
- (*vS).w[i] = ((*vA).w[i] >> sh) | carry;
- carry = new_carry;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.772:VX:av:vsrab %VD, %VA, %VB:Vector Shift Right Algebraic Byte
- int i, sh;
- int16_t a;
- for (i = 0; i < 16; i++) {
- sh = ((*vB).b[i]) & 7;
- a = (int16_t)(int8_t)(*vA).b[i];
- (*vS).b[i] = (a >> sh) & 0xff;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.836:VX:av:vsrah %VD, %VA, %VB:Vector Shift Right Algebraic Half Word
- int i, sh;
- int32_t a;
- for (i = 0; i < 8; i++) {
- sh = ((*vB).h[i]) & 0xf;
- a = (int32_t)(int16_t)(*vA).h[i];
- (*vS).h[i] = (a >> sh) & 0xffff;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.900:VX:av:vsraw %VD, %VA, %VB:Vector Shift Right Algebraic Word
- int i, sh;
- int64_t a;
- for (i = 0; i < 4; i++) {
- sh = ((*vB).w[i]) & 0xf;
- a = (int64_t)(int32_t)(*vA).w[i];
- (*vS).w[i] = (a >> sh) & 0xffffffff;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.516:VX:av:vsrb %VD, %VA, %VB:Vector Shift Right Byte
- int i, sh;
- for (i = 0; i < 16; i++) {
- sh = ((*vB).b[i]) & 7;
- (*vS).b[i] = (*vA).b[i] >> sh;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.580:VX:av:vsrh %VD, %VA, %VB:Vector Shift Right Half Word
- int i, sh;
- for (i = 0; i < 8; i++) {
- sh = ((*vB).h[i]) & 0xf;
- (*vS).h[i] = (*vA).h[i] >> sh;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1100:VX:av:vsro %VD, %VA, %VB:Vector Shift Right Octet
- int i, sh;
- if (CURRENT_TARGET_BYTE_ORDER == BFD_ENDIAN_BIG)
- sh = ((*vB).b[AV_BINDEX(15)] >> 3) & 0xf;
- else
- sh = ((*vB).b[AV_BINDEX(0)] >> 3) & 0xf;
- for (i = 0; i < 16; i++) {
- if (i < sh)
- (*vS).b[AV_BINDEX(i)] = 0;
- else
- (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(i - sh)];
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.644:VX:av:vsrw %VD, %VA, %VB:Vector Shift Right Word
- int i, sh;
- for (i = 0; i < 4; i++) {
- sh = ((*vB).w[i]) & 0x1f;
- (*vS).w[i] = (*vA).w[i] >> sh;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Subtract instructions, 6-155 ... 6-165
- #
- 0.4,6.VS,11.VA,16.VB,21.1408:VX:av:vsubcuw %VD, %VA, %VB:Vector Subtract Carryout Unsigned Word
- int i;
- int64_t temp, a, b;
- for (i = 0; i < 4; i++) {
- a = (int64_t)(uint32_t)(*vA).w[i];
- b = (int64_t)(uint32_t)(*vB).w[i];
- temp = a - b;
- (*vS).w[i] = ~(temp >> 32) & 1;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.74:VX:av:vsubfp %VD, %VA, %VB:Vector Subtract Floating Point
- int i;
- uint32_t f;
- sim_fpu a, b, d;
- for (i = 0; i < 4; i++) {
- sim_fpu_32to (&a, (*vA).w[i]);
- sim_fpu_32to (&b, (*vB).w[i]);
- sim_fpu_sub (&d, &a, &b);
- sim_fpu_to32 (&f, &d);
- (*vS).w[i] = f;
- }
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1792:VX:av:vsubsbs %VD, %VA, %VB:Vector Subtract Signed Byte Saturate
- int i, sat, tempsat;
- int16_t temp;
- sat = 0;
- for (i = 0; i < 16; i++) {
- temp = (int16_t)(int8_t)(*vA).b[i] - (int16_t)(int8_t)(*vB).b[i];
- (*vS).b[i] = altivec_signed_saturate_8(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1856:VX:av:vsubshs %VD, %VA, %VB:Vector Subtract Signed Half Word Saturate
- int i, sat, tempsat;
- int32_t temp;
- sat = 0;
- for (i = 0; i < 8; i++) {
- temp = (int32_t)(int16_t)(*vA).h[i] - (int32_t)(int16_t)(*vB).h[i];
- (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1920:VX:av:vsubsws %VD, %VA, %VB:Vector Subtract Signed Word Saturate
- int i, sat, tempsat;
- int64_t temp;
- sat = 0;
- for (i = 0; i < 4; i++) {
- temp = (int64_t)(int32_t)(*vA).w[i] - (int64_t)(int32_t)(*vB).w[i];
- (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1024:VX:av:vsububm %VD, %VA, %VB:Vector Subtract Unsigned Byte Modulo
- int i;
- for (i = 0; i < 16; i++)
- (*vS).b[i] = (*vA).b[i] - (*vB).b[i];
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1536:VX:av:vsububs %VD, %VA, %VB:Vector Subtract Unsigned Byte Saturate
- int i, sat, tempsat;
- int16_t temp;
- sat = 0;
- for (i = 0; i < 16; i++) {
- temp = (int16_t)(uint8_t)(*vA).b[i] - (int16_t)(uint8_t)(*vB).b[i];
- (*vS).b[i] = altivec_unsigned_saturate_8(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1088:VX:av:vsubuhm %VD, %VA, %VB:Vector Subtract Unsigned Half Word Modulo
- int i;
- for (i = 0; i < 8; i++)
- (*vS).h[i] = ((*vA).h[i] - (*vB).h[i]) & 0xffff;
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1600:VX:av:vsubuhs %VD, %VA, %VB:Vector Subtract Unsigned Half Word Saturate
- int i, sat, tempsat;
- int32_t temp;
- for (i = 0; i < 8; i++) {
- temp = (int32_t)(uint16_t)(*vA).h[i] - (int32_t)(uint16_t)(*vB).h[i];
- (*vS).h[i] = altivec_unsigned_saturate_16(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1152:VX:av:vsubuwm %VD, %VA, %VB:Vector Subtract Unsigned Word Modulo
- int i;
- for (i = 0; i < 4; i++)
- (*vS).w[i] = (*vA).w[i] - (*vB).w[i];
- PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1664:VX:av:vsubuws %VD, %VA, %VB:Vector Subtract Unsigned Word Saturate
- int i, sat, tempsat;
- int64_t temp;
- for (i = 0; i < 4; i++) {
- temp = (int64_t)(uint32_t)(*vA).w[i] - (int64_t)(uint32_t)(*vB).w[i];
- (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Sum instructions, 6-166 ... 6-170
- #
- 0.4,6.VS,11.VA,16.VB,21.1928:VX:av:vsumsws %VD, %VA, %VB:Vector Sum Across Signed Word Saturate
- int i, sat;
- int64_t temp;
- temp = (int64_t)(int32_t)(*vB).w[3];
- for (i = 0; i < 4; i++)
- temp += (int64_t)(int32_t)(*vA).w[i];
- (*vS).w[3] = altivec_signed_saturate_32(temp, &sat);
- (*vS).w[0] = (*vS).w[1] = (*vS).w[2] = 0;
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1672:VX:av:vsum2sws %VD, %VA, %VB:Vector Sum Across Partial (1/2) Signed Word Saturate
- int i, j, sat, tempsat;
- int64_t temp;
- for (j = 0; j < 4; j += 2) {
- temp = (int64_t)(int32_t)(*vB).w[j+1];
- temp += (int64_t)(int32_t)(*vA).w[j] + (int64_t)(int32_t)(*vA).w[j+1];
- (*vS).w[j+1] = altivec_signed_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- (*vS).w[0] = (*vS).w[2] = 0;
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1800:VX:av:vsum4sbs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Signed Byte Saturate
- int i, j, sat, tempsat;
- int64_t temp;
- for (j = 0; j < 4; j++) {
- temp = (int64_t)(int32_t)(*vB).w[j];
- for (i = 0; i < 4; i++)
- temp += (int64_t)(int8_t)(*vA).b[i+(j*4)];
- (*vS).w[j] = altivec_signed_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1608:VX:av:vsum4shs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Signed Half Word Saturate
- int i, j, sat, tempsat;
- int64_t temp;
- for (j = 0; j < 4; j++) {
- temp = (int64_t)(int32_t)(*vB).w[j];
- for (i = 0; i < 2; i++)
- temp += (int64_t)(int16_t)(*vA).h[i+(j*2)];
- (*vS).w[j] = altivec_signed_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- 0.4,6.VS,11.VA,16.VB,21.1544:VX:av:vsum4ubs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Unsigned Byte Saturate
- int i, j, sat, tempsat;
- int64_t utemp;
- int64_t temp;
- for (j = 0; j < 4; j++) {
- utemp = (int64_t)(uint32_t)(*vB).w[j];
- for (i = 0; i < 4; i++)
- utemp += (int64_t)(uint16_t)(*vA).b[i+(j*4)];
- temp = utemp;
- (*vS).w[j] = altivec_unsigned_saturate_32(temp, &tempsat);
- sat |= tempsat;
- }
- ALTIVEC_SET_SAT(sat);
- PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
- #
- # Vector Unpack instructions, 6-171 ... 6-176
- #
- 0.4,6.VS,11.0,16.VB,21.846:VX:av:vupkhpx %VD, %VB:Vector Unpack High Pixel16
- int i;
- uint16_t h;
- for (i = 0; i < 4; i++) {
- h = (*vB).h[AV_HINDEX(i)];
- (*vS).w[i] = ((h & 0x8000) ? 0xff000000 : 0)
- | ((h & 0x7c00) << 6)
- | ((h & 0x03e0) << 3)
- | ((h & 0x001f));
- }
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.0,16.VB,21.526:VX:av:vupkhsb %VD, %VB:Vector Unpack High Signed Byte
- int i;
- for (i = 0; i < 8; i++)
- (*vS).h[AV_HINDEX(i)] = (int16_t)(int8_t)(*vB).b[AV_BINDEX(i)];
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.0,16.VB,21.590:VX:av:vupkhsh %VD, %VB:Vector Unpack High Signed Half Word
- int i;
- for (i = 0; i < 4; i++)
- (*vS).w[i] = (int32_t)(int16_t)(*vB).h[AV_HINDEX(i)];
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.0,16.VB,21.974:VX:av:vupklpx %VD, %VB:Vector Unpack Low Pixel16
- int i;
- uint16_t h;
- for (i = 0; i < 4; i++) {
- h = (*vB).h[AV_HINDEX(i + 4)];
- (*vS).w[i] = ((h & 0x8000) ? 0xff000000 : 0)
- | ((h & 0x7c00) << 6)
- | ((h & 0x03e0) << 3)
- | ((h & 0x001f));
- }
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.0,16.VB,21.654:VX:av:vupklsb %VD, %VB:Vector Unpack Low Signed Byte
- int i;
- for (i = 0; i < 8; i++)
- (*vS).h[AV_HINDEX(i)] = (int16_t)(int8_t)(*vB).b[AV_BINDEX(i + 8)];
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
- 0.4,6.VS,11.0,16.VB,21.718:VX:av:vupklsh %VD, %VB:Vector Unpack Low Signed Half Word
- int i;
- for (i = 0; i < 4; i++)
- (*vS).w[i] = (int32_t)(int16_t)(*vB).h[AV_HINDEX(i + 4)];
- PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
|