diff options
Diffstat (limited to 'gcc/config/rs6000')
-rw-r--r-- | gcc/config/rs6000/altivec.h | 26 | ||||
-rw-r--r-- | gcc/config/rs6000/altivec.md | 53 | ||||
-rw-r--r-- | gcc/config/rs6000/builtin.ops | 297 | ||||
-rw-r--r-- | gcc/config/rs6000/darwin-fpsave.asm | 69 | ||||
-rw-r--r-- | gcc/config/rs6000/darwin-vecsave.asm | 133 | ||||
-rw-r--r-- | gcc/config/rs6000/darwin-worldsave.asm | 233 | ||||
-rw-r--r-- | gcc/config/rs6000/darwin.h | 160 | ||||
-rwxr-xr-x | gcc/config/rs6000/ops-to-gp | 620 | ||||
-rw-r--r-- | gcc/config/rs6000/rs6000-c.c | 123 | ||||
-rw-r--r-- | gcc/config/rs6000/rs6000-protos.h | 12 | ||||
-rw-r--r-- | gcc/config/rs6000/rs6000.c | 960 | ||||
-rw-r--r-- | gcc/config/rs6000/rs6000.h | 67 | ||||
-rw-r--r-- | gcc/config/rs6000/rs6000.md | 281 | ||||
-rw-r--r-- | gcc/config/rs6000/sysv4.h | 7 | ||||
-rw-r--r-- | gcc/config/rs6000/t-darwin | 8 | ||||
-rw-r--r-- | gcc/config/rs6000/t-linux64 | 6 | ||||
-rw-r--r-- | gcc/config/rs6000/t-rs6000 | 1 | ||||
-rw-r--r-- | gcc/config/rs6000/t-rtems | 86 | ||||
-rw-r--r-- | gcc/config/rs6000/vec.h | 4515 | ||||
-rw-r--r-- | gcc/config/rs6000/vec.ops | 1025 |
20 files changed, 8411 insertions, 271 deletions
diff --git a/gcc/config/rs6000/altivec.h b/gcc/config/rs6000/altivec.h index 2ae567ef3d4..07278e95789 100644 --- a/gcc/config/rs6000/altivec.h +++ b/gcc/config/rs6000/altivec.h @@ -36,10 +36,11 @@ #error Use the "-maltivec" flag to enable PowerPC AltiVec support #endif -/* You are allowed to undef these for C++ compatibility. */ -#define vector __vector -#define pixel __pixel -#define bool __bool +/* APPLE LOCAL begin AltiVec */ +/* The keywords 'vector', 'pixel' and 'bool' are now implemented as + context-sensitive macros, and hence should not be defined + unconditionally. */ +/* APPLE LOCAL end AltiVec */ /* Condition register codes for AltiVec predicates. */ @@ -117,7 +118,7 @@ inline void vec_dst (const vector unsigned int *, int, const int) __attribute__ inline void vec_dst (const vector signed int *, int, const int) __attribute__ ((always_inline)); inline void vec_dst (const vector bool int *, int, const int) __attribute__ ((always_inline)); inline void vec_dst (const vector float *, int, const int) __attribute__ ((always_inline)); -inline void vec_dst (const int *, int, const int) __attribute__ ((always_inline)); +inline void vec_dst (const unsigned char *, int, const int) __attribute__ ((always_inline)); inline void vec_dst (const signed char *, int, const int) __attribute__ ((always_inline)); inline void vec_dst (const unsigned short *, int, const int) __attribute__ ((always_inline)); inline void vec_dst (const short *, int, const int) __attribute__ ((always_inline)); @@ -138,7 +139,7 @@ inline void vec_dstst (const vector unsigned int *, int, const int) __attribute_ inline void vec_dstst (const vector signed int *, int, const int) __attribute__ ((always_inline)); inline void vec_dstst (const vector bool int *, int, const int) __attribute__ ((always_inline)); inline void vec_dstst (const vector float *, int, const int) __attribute__ ((always_inline)); -inline void vec_dstst (const int *, int, const int) __attribute__ ((always_inline)); +inline void vec_dstst (const unsigned char *, int, const int) __attribute__ ((always_inline)); inline void vec_dstst (const signed char *, int, const int) __attribute__ ((always_inline)); inline void vec_dstst (const unsigned short *, int, const int) __attribute__ ((always_inline)); inline void vec_dstst (const short *, int, const int) __attribute__ ((always_inline)); @@ -159,7 +160,7 @@ inline void vec_dststt (const vector unsigned int *, int, const int) __attribute inline void vec_dststt (const vector signed int *, int, const int) __attribute__ ((always_inline)); inline void vec_dststt (const vector bool int *, int, const int) __attribute__ ((always_inline)); inline void vec_dststt (const vector float *, int, const int) __attribute__ ((always_inline)); -inline void vec_dststt (const int *, int, const int) __attribute__ ((always_inline)); +inline void vec_dststt (const unsigned char *, int, const int) __attribute__ ((always_inline)); inline void vec_dststt (const signed char *, int, const int) __attribute__ ((always_inline)); inline void vec_dststt (const unsigned short *, int, const int) __attribute__ ((always_inline)); inline void vec_dststt (const short *, int, const int) __attribute__ ((always_inline)); @@ -180,7 +181,7 @@ inline void vec_dstt (const vector unsigned int *, int, const int) __attribute__ inline void vec_dstt (const vector signed int *, int, const int) __attribute__ ((always_inline)); inline void vec_dstt (const vector bool int *, int, const int) __attribute__ ((always_inline)); inline void vec_dstt (const vector float *, int, const int) __attribute__ ((always_inline)); -inline void vec_dstt (const int *, int, const int) __attribute__ ((always_inline)); +inline void vec_dstt (const unsigned char *, int, const int) __attribute__ ((always_inline)); inline void vec_dstt (const signed char *, int, const int) __attribute__ ((always_inline)); inline void vec_dstt (const unsigned short *, int, const int) __attribute__ ((always_inline)); inline void vec_dstt (const short *, int, const int) __attribute__ ((always_inline)); @@ -195,15 +196,20 @@ inline vector signed int vec_sld (vector signed int, vector signed int, const in inline vector unsigned int vec_sld (vector unsigned int, vector unsigned int, const int) __attribute__ ((always_inline)); inline vector signed short vec_sld (vector signed short, vector signed short, const int) __attribute__ ((always_inline)); inline vector unsigned short vec_sld (vector unsigned short, vector unsigned short, const int) __attribute__ ((always_inline)); +inline vector pixel vec_sld (vector pixel, vector pixel, const int) __attribute__ ((always_inline)); inline vector signed char vec_sld (vector signed char, vector signed char, const int) __attribute__ ((always_inline)); inline vector unsigned char vec_sld (vector unsigned char, vector unsigned char, const int) __attribute__ ((always_inline)); inline vector signed char vec_splat (vector signed char, const int) __attribute__ ((always_inline)); inline vector unsigned char vec_splat (vector unsigned char, const int) __attribute__ ((always_inline)); +inline vector bool char vec_splat (vector bool char, const int) __attribute__ ((always_inline)); inline vector signed short vec_splat (vector signed short, const int) __attribute__ ((always_inline)); inline vector unsigned short vec_splat (vector unsigned short, const int) __attribute__ ((always_inline)); +inline vector bool short vec_splat (vector bool short, const int) __attribute__ ((always_inline)); +inline vector pixel vec_splat (vector pixel, const int) __attribute__ ((always_inline)); inline vector float vec_splat (vector float, const int) __attribute__ ((always_inline)); inline vector signed int vec_splat (vector signed int, const int) __attribute__ ((always_inline)); inline vector unsigned int vec_splat (vector unsigned int, const int) __attribute__ ((always_inline)); +inline vector bool int vec_splat (vector bool int, const int) __attribute__ ((always_inline)); inline vector signed char vec_splat_s8 (const int) __attribute__ ((always_inline)); inline vector signed short vec_splat_s16 (const int) __attribute__ ((always_inline)); inline vector signed int vec_splat_s32 (const int) __attribute__ ((always_inline)); @@ -8897,7 +8903,7 @@ __ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \ ((vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \ __ch (__bin_args_eq (vector bool int, (a1), vector bool int, (a2)), \ ((vector bool int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \ - __builtin_altivec_compiletime_error ("vec_mergeh"))))))))))) + __builtin_altivec_compiletime_error ("vec_mergeh")))))))))))) #define vec_vmrghw(a1, a2) \ __ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \ @@ -8945,7 +8951,7 @@ __ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \ ((vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \ __ch (__bin_args_eq (vector bool int, (a1), vector bool int, (a2)), \ ((vector bool int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \ - __builtin_altivec_compiletime_error ("vec_mergel")))))))) + __builtin_altivec_compiletime_error ("vec_mergel")))))))))))) #define vec_vmrglw(a1, a2) \ __ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \ diff --git a/gcc/config/rs6000/altivec.md b/gcc/config/rs6000/altivec.md index 2ede79d9f5c..6a46328fb24 100644 --- a/gcc/config/rs6000/altivec.md +++ b/gcc/config/rs6000/altivec.md @@ -388,6 +388,22 @@ "vaddsws %0,%1,%2" [(set_attr "type" "vecsimple")]) +(define_insn "andv16qi3" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (and:V16QI (match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vand %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "andv8hi3" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (and:V8HI (match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vand %0,%1,%2" + [(set_attr "type" "vecsimple")]) + (define_insn "andv4si3" [(set (match_operand:V4SI 0 "register_operand" "=v") (and:V4SI (match_operand:V4SI 1 "register_operand" "v") @@ -976,6 +992,43 @@ "vnor %0,%1,%2" [(set_attr "type" "vecsimple")]) +(define_insn "one_cmplv16qi2" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (not:V16QI (match_operand:V16QI 1 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vnot %0,%1" + [(set_attr "type" "vecsimple")]) + +(define_insn "one_cmplv8hi2" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (not:V8HI (match_operand:V8HI 1 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vnot %0,%1" + [(set_attr "type" "vecsimple")]) + +(define_insn "one_cmplv4si2" + [(set (match_operand:V4SI 0 "register_operand" "=v") + (not:V4SI (match_operand:V4SI 1 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vnot %0,%1" + [(set_attr "type" "vecsimple")]) + +(define_insn "iorv16qi3" + [(set (match_operand:V16QI 0 "register_operand" "=v") + (ior:V16QI (match_operand:V16QI 1 "register_operand" "v") + (match_operand:V16QI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vor %0,%1,%2" + [(set_attr "type" "vecsimple")]) + +(define_insn "iorv8hi3" + [(set (match_operand:V8HI 0 "register_operand" "=v") + (ior:V8HI (match_operand:V8HI 1 "register_operand" "v") + (match_operand:V8HI 2 "register_operand" "v")))] + "TARGET_ALTIVEC" + "vor %0,%1,%2" + [(set_attr "type" "vecsimple")]) + (define_insn "iorv4si3" [(set (match_operand:V4SI 0 "register_operand" "=v") (ior:V4SI (match_operand:V4SI 1 "register_operand" "v") diff --git a/gcc/config/rs6000/builtin.ops b/gcc/config/rs6000/builtin.ops new file mode 100644 index 00000000000..a28e35654fc --- /dev/null +++ b/gcc/config/rs6000/builtin.ops @@ -0,0 +1,297 @@ +# APPLE LOCAL file AltiVec +# ops-to-gp -gcc vec.ops builtin.ops +# @ betype betype-code type-spelling +@ @ float BETYPE_R4 float +@ @ ushort BETYPE_U4 unsigned=short +@ @ uint BETYPE_U4 unsigned=int +@ @ ulong BETYPE_U4 unsigned=long +@ @ immed_u2 U2 0..3 +@ @ immed_u4 U4 0..15 +@ @ immed_s5 I5 -16..15 +@ @ immed_u5 U5 0..31 +@ @ int BETYPE_I4 int +@ @ long BETYPE_I4 long +@ @ ptr PTR void=* +@ @ v16 BETYPE_V16 vec_type +@ @ void BETYPE_I4 void +# fetype betype [code [spelling]] +@ float_ptr ptr i float=* +@ const_float_ptr ptr i float=* +@ const_volatile_float_ptr ptr i float=* +@ int int i +@ int_ptr ptr i int=* +@ long_ptr ptr i long=* +@ const_int_ptr ptr i int=* +@ const_long_ptr ptr i long=* +@ const_volatile_int_ptr ptr i int=* +@ const_volatile_long_ptr ptr i long=* +@ immed_s5 immed_s5 A +@ immed_u5 immed_u5 B +@ immed_u4 immed_u4 C +@ immed_u2 immed_u2 D +@ cc24f int j=24=f +@ cc24fd int j=24=f=d +@ cc24fr int j=24=f=r +@ cc24t int j=24=t +@ cc24td int j=24=t=d +@ cc24tr int j=24=t=r +@ cc26f int j=26=f +@ cc26fd int j=26=f=d +@ cc26fr int j=26=f=r +@ cc26t int j=26=t +@ cc26td int j=26=t=d +@ cc26tr int j=26=t=r +@ short_ptr ptr i short=* +@ signed_char_ptr ptr i signed=char=* +@ unsigned_char_ptr ptr i unsigned=char=* +@ unsigned_short_ptr ptr i unsigned=short=* +@ unsigned_int_ptr ptr i unsigned=int=* +@ unsigned_long_ptr ptr i unsigned=long=* +@ const_short_ptr ptr i short=* +@ const_signed_char_ptr ptr i signed=char=* +@ const_unsigned_char_ptr ptr i unsigned=char=* +@ const_unsigned_short_ptr ptr i unsigned=short=* +@ const_unsigned_int_ptr ptr i unsigned=int=* +@ const_unsigned_long_ptr ptr i unsigned=long=* +@ const_volatile_short_ptr ptr i short=* +@ const_volatile_signed_char_ptr ptr i signed=char=* +@ const_volatile_unsigned_char_ptr ptr i unsigned=char=* +@ const_volatile_unsigned_short_ptr ptr i unsigned=short=* +@ const_volatile_unsigned_int_ptr ptr i unsigned=int=* +@ const_volatile_unsigned_long_ptr ptr i unsigned=long=* +@ vec_b16 v16 x vec_b16 +@ vec_b16_load_op v16 xl vec_b16 +@ vec_b16_ptr ptr i vec_b16=* +@ const_vec_b16_ptr ptr i vec_b16=* +@ vec_b32 v16 x vec_b32 +@ vec_b32_load_op v16 xl vec_b32 +@ vec_b32_ptr ptr i vec_b32=* +@ const_vec_b32_ptr ptr i vec_b32=* +@ vec_b8 v16 x vec_b8 +@ vec_b8_load_op v16 xl vec_b8 +@ vec_b8_ptr ptr i vec_b8=* +@ const_vec_b8_ptr ptr i vec_b8=* +@ vec_f32 v16 x vec_f32 +@ vec_f32_load_op v16 xl vec_f32 +@ vec_f32_ptr ptr i vec_f32=* +@ const_vec_f32_ptr ptr i vec_f32=* +@ vec_p16 v16 x vec_p16 +@ vec_p16_load_op v16 xl vec_p16 +@ vec_p16_ptr ptr i vec_p16=* +@ const_vec_p16_ptr ptr i vec_p16=* +@ vec_s16 v16 x vec_s16 +@ vec_s16_load_op v16 xl vec_s16 +@ vec_s16_ptr ptr i vec_s16=* +@ const_vec_s16_ptr ptr i vec_s16=* +@ vec_s32 v16 x vec_s32 +@ vec_s32_load_op v16 xl vec_s32 +@ vec_s32_ptr ptr i vec_s32=* +@ const_vec_s32_ptr ptr i vec_s32=* +@ vec_s8 v16 x vec_s8 +@ vec_s8_load_op v16 xl vec_s8 +@ vec_s8_ptr ptr i vec_s8=* +@ const_vec_s8_ptr ptr i vec_s8=* +@ vec_u16 v16 x vec_u16 +@ vec_u16_load_op v16 xl vec_u16 +@ vec_u16_ptr ptr i vec_u16=* +@ const_vec_u16_ptr ptr i vec_u16=* +@ vec_u32 v16 x vec_u32 +@ vec_u32_load_op v16 xl vec_u32 +@ vec_u32_ptr ptr i vec_u32=* +@ const_vec_u32_ptr ptr i vec_u32=* +@ vec_u8 v16 x vec_u8 +@ vec_u8_load_op v16 xl vec_u8 +@ vec_u8_ptr ptr i vec_u8=* +@ const_vec_u8_ptr ptr i vec_u8=* +@ void_store_op void s +@ volatile_void void v +@ volatile_void_load_op void vl +@ volatile_void_store_op void vs +@ volatile_vec_u16 v16 vx vec_u16 +@ char_ptr ptr i char=* +@ const_char_ptr ptr i char=* +# @ @ instruction type +@ @ @ MOP_mfvscr fxu +@ @ @ MOP_mtvscr fxu +@ @ @ MOP_dss load +@ @ @ MOP_dssall load +@ @ @ MOP_dst load +@ @ @ MOP_dstst load +@ @ @ MOP_dststt load +@ @ @ MOP_dstt load +@ @ @ MOP_lvebx load +@ @ @ MOP_lvehx load +@ @ @ MOP_lvewx load +@ @ @ MOP_lvsl load +@ @ @ MOP_lvsr load +@ @ @ MOP_lvx load +@ @ @ MOP_lvxl load +@ @ @ MOP_stvebx store +@ @ @ MOP_stvehx store +@ @ @ MOP_stvewx store +@ @ @ MOP_stvx store +@ @ @ MOP_stvxl store +@ @ @ MOP_vaddcuw simple +@ @ @ MOP_vaddfp fp +@ @ @ MOP_vaddsbs simple +@ @ @ MOP_vaddshs simple +@ @ @ MOP_vaddsws simple +@ @ @ MOP_vaddubm simple +@ @ @ MOP_vaddubs simple +@ @ @ MOP_vadduhm simple +@ @ @ MOP_vadduhs simple +@ @ @ MOP_vadduwm simple +@ @ @ MOP_vadduws simple +@ @ @ MOP_vand simple +@ @ @ MOP_vandc simple +@ @ @ MOP_vavgsb simple +@ @ @ MOP_vavgsh simple +@ @ @ MOP_vavgsw simple +@ @ @ MOP_vavgub simple +@ @ @ MOP_vavguh simple +@ @ @ MOP_vavguw simple +@ @ @ MOP_vcfsx fp +@ @ @ MOP_vcfux fp +@ @ @ MOP_vcmpbfp simple +@ @ @ MOP_vcmpbfpD simple +@ @ @ MOP_vcmpeqfp simple +@ @ @ MOP_vcmpeqfpD simple +@ @ @ MOP_vcmpequb simple +@ @ @ MOP_vcmpequbD simple +@ @ @ MOP_vcmpequh simple +@ @ @ MOP_vcmpequhD simple +@ @ @ MOP_vcmpequw simple +@ @ @ MOP_vcmpequwD simple +@ @ @ MOP_vcmpgefp simple +@ @ @ MOP_vcmpgefpD simple +@ @ @ MOP_vcmpgtfp simple +@ @ @ MOP_vcmpgtfpD simple +@ @ @ MOP_vcmpgtsb simple +@ @ @ MOP_vcmpgtsbD simple +@ @ @ MOP_vcmpgtsh simple +@ @ @ MOP_vcmpgtshD simple +@ @ @ MOP_vcmpgtsw simple +@ @ @ MOP_vcmpgtswD simple +@ @ @ MOP_vcmpgtub simple +@ @ @ MOP_vcmpgtubD simple +@ @ @ MOP_vcmpgtuh simple +@ @ @ MOP_vcmpgtuhD simple +@ @ @ MOP_vcmpgtuw simple +@ @ @ MOP_vcmpgtuwD simple +@ @ @ MOP_vctsxs fp +@ @ @ MOP_vctuxs fp +@ @ @ MOP_vexptefp fp +@ @ @ MOP_vlogefp fp +@ @ @ MOP_vmaddfp fp +@ @ @ MOP_vmaxfp simple +@ @ @ MOP_vmaxsb simple +@ @ @ MOP_vmaxsh simple +@ @ @ MOP_vmaxsw simple +@ @ @ MOP_vmaxub simple +@ @ @ MOP_vmaxuh simple +@ @ @ MOP_vmaxuw simple +@ @ @ MOP_vmhaddshs complex +@ @ @ MOP_vmhraddshs complex +@ @ @ MOP_vminfp simple +@ @ @ MOP_vminsb simple +@ @ @ MOP_vminsh simple +@ @ @ MOP_vminsw simple +@ @ @ MOP_vminub simple +@ @ @ MOP_vminuh simple +@ @ @ MOP_vminuw simple +@ @ @ MOP_vmladduhm complex +@ @ @ MOP_vmrghb perm +@ @ @ MOP_vmrghh perm +@ @ @ MOP_vmrghw perm +@ @ @ MOP_vmrglb perm +@ @ @ MOP_vmrglh perm +@ @ @ MOP_vmrglw perm +@ @ @ MOP_vmsummbm complex +@ @ @ MOP_vmsumshm complex +@ @ @ MOP_vmsumshs complex +@ @ @ MOP_vmsumubm complex +@ @ @ MOP_vmsumuhm complex +@ @ @ MOP_vmsumuhs complex +@ @ @ MOP_vmulesb complex +@ @ @ MOP_vmulesh complex +@ @ @ MOP_vmuleub complex +@ @ @ MOP_vmuleuh complex +@ @ @ MOP_vmulosb complex +@ @ @ MOP_vmulosh complex +@ @ @ MOP_vmuloub complex +@ @ @ MOP_vmulouh complex +@ @ @ MOP_vnmsubfp fp +@ @ @ MOP_vnor simple +@ @ @ MOP_vor simple +@ @ @ MOP_vperm perm +@ @ @ MOP_vpkpx perm +@ @ @ MOP_vpkshss perm +@ @ @ MOP_vpkshus perm +@ @ @ MOP_vpkswss perm +@ @ @ MOP_vpkswus perm +@ @ @ MOP_vpkuhum perm +@ @ @ MOP_vpkuhus perm +@ @ @ MOP_vpkuwum perm +@ @ @ MOP_vpkuwus perm +@ @ @ MOP_vrefp fp +@ @ @ MOP_vrfim fp +@ @ @ MOP_vrfin fp +@ @ @ MOP_vrfip fp +@ @ @ MOP_vrfiz fp +@ @ @ MOP_vrlb simple +@ @ @ MOP_vrlh simple +@ @ @ MOP_vrlw simple +@ @ @ MOP_vrsqrtefp fp +@ @ @ MOP_vsel simple +@ @ @ MOP_vsl simple +@ @ @ MOP_vslb simple +@ @ @ MOP_vsldoi perm +@ @ @ MOP_vslh simple +@ @ @ MOP_vslo perm_bug +@ @ @ MOP_vslw simple +@ @ @ MOP_vspltb perm +@ @ @ MOP_vsplth perm +@ @ @ MOP_vspltisb perm +@ @ @ MOP_vspltish perm +@ @ @ MOP_vspltisw perm +@ @ @ MOP_vspltw perm +@ @ @ MOP_vsr simple +@ @ @ MOP_vsrab simple +@ @ @ MOP_vsrah simple +@ @ @ MOP_vsraw simple +@ @ @ MOP_vsrb simple +@ @ @ MOP_vsrh simple +@ @ @ MOP_vsro perm_bug +@ @ @ MOP_vsrw simple +@ @ @ MOP_vsubcuw simple +@ @ @ MOP_vsubfp fp +@ @ @ MOP_vsubsbs simple +@ @ @ MOP_vsubshs simple +@ @ @ MOP_vsubsws simple +@ @ @ MOP_vsububm simple +@ @ @ MOP_vsububs simple +@ @ @ MOP_vsubuhm simple +@ @ @ MOP_vsubuhs simple +@ @ @ MOP_vsubuwm simple +@ @ @ MOP_vsubuws simple +@ @ @ MOP_vsum2sws complex +@ @ @ MOP_vsum4sbs complex +@ @ @ MOP_vsum4shs complex +@ @ @ MOP_vsum4ubs complex +@ @ @ MOP_vsumsws complex +@ @ @ MOP_vupkhpx perm +@ @ @ MOP_vupkhsb perm +@ @ @ MOP_vupkhsh perm +@ @ @ MOP_vupklpx perm +@ @ @ MOP_vupklsb perm +@ @ @ MOP_vupklsh perm +@ @ @ MOP_vxor simple +# The vec_abs and vec_abss operations identify their variants with insn_name. +# Map these into a valid insn code (xfx_perm). +@ @ @ 1 perm +@ @ @ 2 perm +@ @ @ 3 perm +@ @ @ 4 perm +@ @ @ 5 perm +@ @ @ 6 perm +@ @ @ 7 perm diff --git a/gcc/config/rs6000/darwin-fpsave.asm b/gcc/config/rs6000/darwin-fpsave.asm new file mode 100644 index 00000000000..d25a1141c45 --- /dev/null +++ b/gcc/config/rs6000/darwin-fpsave.asm @@ -0,0 +1,69 @@ +/* APPLE LOCAL file performance improvement */ +/* This file contains the floating-point save and restore routines. + + THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE + ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31, + for example. For FP reg saves/restores, it takes one instruction + (4 bytes) to do the operation; for Vector regs, 2 instructions are + required (8 bytes.) + + MORAL: DO NOT MESS AROUND WITH THESE FUNCTIONS! */ + +.text + .align 2 + +/* saveFP saves R0 -- assumed to be the callers LR -- to 8(R1). */ + +.private_extern saveFP +saveFP: + stfd f14,-144(r1) + stfd f15,-136(r1) + stfd f16,-128(r1) + stfd f17,-120(r1) + stfd f18,-112(r1) + stfd f19,-104(r1) + stfd f20,-96(r1) + stfd f21,-88(r1) + stfd f22,-80(r1) + stfd f23,-72(r1) + stfd f24,-64(r1) + stfd f25,-56(r1) + stfd f26,-48(r1) + stfd f27,-40(r1) + stfd f28,-32(r1) + stfd f29,-24(r1) + stfd f30,-16(r1) + stfd f31,-8(r1) + stw r0,8(r1) + blr + +/* restFP restores the caller`s LR from 8(R1). Note that the code for + this starts at the offset of F30 restoration, so calling this + routine in an attempt to restore only F31 WILL NOT WORK (it would + be a stupid thing to do, anyway.) */ + +.private_extern restFP +restFP: + lfd f14,-144(r1) + lfd f15,-136(r1) + lfd f16,-128(r1) + lfd f17,-120(r1) + lfd f18,-112(r1) + lfd f19,-104(r1) + lfd f20,-96(r1) + lfd f21,-88(r1) + lfd f22,-80(r1) + lfd f23,-72(r1) + lfd f24,-64(r1) + lfd f25,-56(r1) + lfd f26,-48(r1) + lfd f27,-40(r1) + lfd f28,-32(r1) + lfd f29,-24(r1) + /* <OFFSET OF F30 RESTORE> restore callers LR */ + lwz r0,8(r1) + lfd f30,-16(r1) + /* and prepare for return to caller */ + mtlr r0 + lfd f31,-8(r1) + blr diff --git a/gcc/config/rs6000/darwin-vecsave.asm b/gcc/config/rs6000/darwin-vecsave.asm new file mode 100644 index 00000000000..8c5352ee709 --- /dev/null +++ b/gcc/config/rs6000/darwin-vecsave.asm @@ -0,0 +1,133 @@ +/* APPLE LOCAL file AltiVec */ +/* Vector save/restore routines for Darwin. Note that each vector + save/restore requires 2 instructions (8 bytes.) + + THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE + ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31, + for example. For FP reg saves/restores, it takes one instruction + (4 bytes) to do the operation; for Vector regs, 2 instructions are + required (8 bytes.) + + MORAL: DO NOT MESS AROUND WITH THESE FUNCTIONS! */ + +.text + .align 2 + +.private_extern saveVEC +saveVEC: + li r11,-192 + stvx v20,r11,r0 + li r11,-176 + stvx v21,r11,r0 + li r11,-160 + stvx v22,r11,r0 + li r11,-144 + stvx v23,r11,r0 + li r11,-128 + stvx v24,r11,r0 + li r11,-112 + stvx v25,r11,r0 + li r11,-96 + stvx v26,r11,r0 + li r11,-80 + stvx v27,r11,r0 + li r11,-64 + stvx v28,r11,r0 + li r11,-48 + stvx v29,r11,r0 + li r11,-32 + stvx v30,r11,r0 + li r11,-16 + stvx v31,r11,r0 + blr + +.private_extern restVEC +restVEC: + li r11,-192 + lvx v20,r11,r0 + li r11,-176 + lvx v21,r11,r0 + li r11,-160 + lvx v22,r11,r0 + li r11,-144 + lvx v23,r11,r0 + li r11,-128 + lvx v24,r11,r0 + li r11,-112 + lvx v25,r11,r0 + li r11,-96 + lvx v26,r11,r0 + li r11,-80 + lvx v27,r11,r0 + li r11,-64 + lvx v28,r11,r0 + li r11,-48 + lvx v29,r11,r0 + li r11,-32 + lvx v30,r11,r0 + li r11,-16 + lvx v31,r11,r0 + blr + +/* saveVEC_vr11 -- as saveVEC but VRsave is returned in R11. */ + +.private_extern saveVEC_vr11 +saveVEC_vr11: + li r11,-192 + stvx v20,r11,r0 + li r11,-176 + stvx v21,r11,r0 + li r11,-160 + stvx v22,r11,r0 + li r11,-144 + stvx v23,r11,r0 + li r11,-128 + stvx v24,r11,r0 + li r11,-112 + stvx v25,r11,r0 + li r11,-96 + stvx v26,r11,r0 + li r11,-80 + stvx v27,r11,r0 + li r11,-64 + stvx v28,r11,r0 + li r11,-48 + stvx v29,r11,r0 + li r11,-32 + stvx v30,r11,r0 + li r11,-16 + stvx v31,r11,r0 + mfspr r11,VRsave + blr + +/* As restVec, but the original VRsave value passed in R10. */ + +.private_extern restVEC_vr10 +restVEC_vr10: + li r11,-192 + lvx v20,r11,r0 + li r11,-176 + lvx v21,r11,r0 + li r11,-160 + lvx v22,r11,r0 + li r11,-144 + lvx v23,r11,r0 + li r11,-128 + lvx v24,r11,r0 + li r11,-112 + lvx v25,r11,r0 + li r11,-96 + lvx v26,r11,r0 + li r11,-80 + lvx v27,r11,r0 + li r11,-64 + lvx v28,r11,r0 + li r11,-48 + lvx v29,r11,r0 + li r11,-32 + lvx v30,r11,r0 + li r11,-16 + lvx v31,r11,r0 + /* restore VRsave from R10. */ + mtspr VRsave,r10 + blr diff --git a/gcc/config/rs6000/darwin-worldsave.asm b/gcc/config/rs6000/darwin-worldsave.asm new file mode 100644 index 00000000000..2d43f61adde --- /dev/null +++ b/gcc/config/rs6000/darwin-worldsave.asm @@ -0,0 +1,233 @@ +/* APPLE LOCAL file world save/restore */ +/* This file contains the exception-handling save_world and + restore_world routines, which need to do a run-time check to see if + they should save and restore the vector regs. */ + +.data + .align 2 + +#ifdef __DYNAMIC__ + +.non_lazy_symbol_pointer +L_has_vec$non_lazy_ptr: + .indirect_symbol __cpu_has_altivec + .long 0 + +#else + +/* For static, "pretend" we have a non-lazy-pointer. */ + +L_has_vec$non_lazy_ptr: + .long __cpu_has_altivec + +#endif + + +.text + .align 2 + +/* save_world and rest_world save/restore F14-F31 and possibly V20-V31 + (assuming you have a CPU with vector registers; we use a global var + provided by the System Framework to determine this.) + + SAVE_WORLD takes R0 (the caller`s caller`s return address) and R11 + (the stack frame size) as parameters. It returns VRsave in R0 if + we`re on a CPU with vector regs. + + With gcc3, we now need to save and restore CR as well, since gcc3's + scheduled prologs can cause comparisons to be moved before calls to + save_world! + + USES: R0 R11 R12 */ + +.private_extern save_world +save_world: + stw r0,8(r1) + mflr r0 + bcl 20,31,Ls$pb +Ls$pb: mflr r12 + addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Ls$pb) + lwz r12,lo16(L_has_vec$non_lazy_ptr-Ls$pb)(r12) + mtlr r0 + lwz r12,0(r12) + /* grab CR */ + mfcr r0 + /* test HAS_VEC */ + cmpwi r12,0 + stfd f14,-144(r1) + stfd f15,-136(r1) + stfd f16,-128(r1) + stfd f17,-120(r1) + stfd f18,-112(r1) + stfd f19,-104(r1) + stfd f20,-96(r1) + stfd f21,-88(r1) + stfd f22,-80(r1) + stfd f23,-72(r1) + stfd f24,-64(r1) + stfd f25,-56(r1) + stfd f26,-48(r1) + stfd f27,-40(r1) + stfd f28,-32(r1) + stfd f29,-24(r1) + stfd f30,-16(r1) + stfd f31,-8(r1) + stmw r13,-220(r1) + /* stash CR */ + stw r0,4(r1) + /* set R12 pointing at Vector Reg save area */ + addi r12,r1,-224 + /* allocate stack frame */ + stwux r1,r1,r11 + /* ...but return if HAS_VEC is zero */ + bne+ L$saveVMX + /* Not forgetting to restore CR. */ + mtcr r0 + blr + +L$saveVMX: + /* We're saving Vector regs too. */ + /* Restore CR from R0. No More Branches! */ + mtcr r0 + + /* We should really use VRSAVE to figure out which vector regs + we actually need to save and restore. Some other time :-/ */ + + li r11,-192 + stvx v20,r11,r12 + li r11,-176 + stvx v21,r11,r12 + li r11,-160 + stvx v22,r11,r12 + li r11,-144 + stvx v23,r11,r12 + li r11,-128 + stvx v24,r11,r12 + li r11,-112 + stvx v25,r11,r12 + li r11,-96 + stvx v26,r11,r12 + li r11,-80 + stvx v27,r11,r12 + li r11,-64 + stvx v28,r11,r12 + li r11,-48 + stvx v29,r11,r12 + li r11,-32 + stvx v30,r11,r12 + mfspr r0,VRsave + li r11,-16 + stvx v31,r11,r12 + /* VRsave lives at -224(R1) */ + stw r0,0(r12) + blr + + +/* eh_rest_world_r10 is jumped to, not called, so no need to worry about LR. + R10 is the C++ EH stack adjust parameter, we return to the caller`s caller. + + USES: R0 R10 R11 R12 and R7 R8 + RETURNS: C++ EH Data registers (R3 - R6.) + + We now set up R7/R8 and jump to rest_world_eh_r7r8. + + rest_world doesn't use the R10 stack adjust parameter, nor does it + pick up the R3-R6 exception handling stuff. */ + +.private_extern rest_world +rest_world: + /* Pickup previous SP */ + lwz r11, 0(r1) + li r7, 0 + lwz r8, 8(r11) + li r10, 0 + b rest_world_eh_r7r8 + +.private_extern eh_rest_world_r10 +eh_rest_world_r10: + /* Pickup previous SP */ + lwz r11, 0(r1) + mr r7,r10 + lwz r8, 8(r11) + /* pickup the C++ EH data regs (R3 - R6.) */ + lwz r6,-420(r11) + lwz r5,-424(r11) + lwz r4,-428(r11) + lwz r3,-432(r11) + + b rest_world_eh_r7r8 + +/* rest_world_eh_r7r8 is jumped to -- not called! -- when we're doing + the exception-handling epilog. R7 contains the offset to add to + the SP, and R8 contains the 'real' return address. + + USES: R0 R11 R12 [R7/R8] + RETURNS: C++ EH Data registers (R3 - R6.) */ + +rest_world_eh_r7r8: + bcl 20,31,Lr7r8$pb +Lr7r8$pb: mflr r12 + lwz r11,0(r1) + /* R11 := previous SP */ + addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Lr7r8$pb) + lwz r12,lo16(L_has_vec$non_lazy_ptr-Lr7r8$pb)(r12) + lwz r0,4(r11) + /* R0 := old CR */ + lwz r12,0(r12) + /* R12 := HAS_VEC */ + mtcr r0 + cmpwi r12,0 + lmw r13,-220(r11) + beq L.rest_world_fp_eh + /* restore VRsave and V20..V31 */ + lwz r0,-224(r11) + li r12,-416 + mtspr VRsave,r0 + lvx v20,r11,r12 + li r12,-400 + lvx v21,r11,r12 + li r12,-384 + lvx v22,r11,r12 + li r12,-368 + lvx v23,r11,r12 + li r12,-352 + lvx v24,r11,r12 + li r12,-336 + lvx v25,r11,r12 + li r12,-320 + lvx v26,r11,r12 + li r12,-304 + lvx v27,r11,r12 + li r12,-288 + lvx v28,r11,r12 + li r12,-272 + lvx v29,r11,r12 + li r12,-256 + lvx v30,r11,r12 + li r12,-240 + lvx v31,r11,r12 + +L.rest_world_fp_eh: + lfd f14,-144(r11) + lfd f15,-136(r11) + lfd f16,-128(r11) + lfd f17,-120(r11) + lfd f18,-112(r11) + lfd f19,-104(r11) + lfd f20,-96(r11) + lfd f21,-88(r11) + lfd f22,-80(r11) + lfd f23,-72(r11) + lfd f24,-64(r11) + lfd f25,-56(r11) + lfd f26,-48(r11) + lfd f27,-40(r11) + lfd f28,-32(r11) + lfd f29,-24(r11) + lfd f30,-16(r11) + /* R8 is the exception-handler's address */ + mtctr r8 + lfd f31,-8(r11) + /* set SP to original value + R7 offset */ + add r1,r11,r7 + bctr diff --git a/gcc/config/rs6000/darwin.h b/gcc/config/rs6000/darwin.h index 6f193f739c3..5f6f353213f 100644 --- a/gcc/config/rs6000/darwin.h +++ b/gcc/config/rs6000/darwin.h @@ -96,21 +96,51 @@ do { \ #define CC1_SPEC "\ %{gused: -feliminate-unused-debug-symbols %<gused }\ %{static: %{Zdynamic: %e conflicting code gen style switches are used}}\ -%{!static:%{!mdynamic-no-pic:-fPIC}}" +%{!static:%{!fast:%{!fastf:%{!fastcp:%{!mdynamic-no-pic:-fPIC}}}}}" + +/* APPLE LOCAL begin 3492132 */ /* It's virtually impossible to predict all the possible combinations of -mcpu and -maltivec and whatnot, so just supply -force_cpusubtype_ALL if any are seen. Radar 3492132 against the assembler is asking for a .machine directive so we could get this really right. */ -#define ASM_SPEC "-arch ppc \ +#define ASM_SPEC " %(darwin_arch_asm_spec)\ %{Zforce_cpusubtype_ALL:-force_cpusubtype_ALL} \ - %{!Zforce_cpusubtype_ALL:%{maltivec|mcpu=*|mpowerpc64:-force_cpusubtype_ALL}}" + %{!Zforce_cpusubtype_ALL:%{maltivec|faltivec:-force_cpusubtype_ALL}}" + +#define DARWIN_ARCH_LD_SPEC \ +"%{mcpu=601: %{!Zdynamiclib:-arch ppc601} %{Zdynamiclib:-arch_only ppc601}} \ + %{mcpu=603: %{!Zdynamiclib:-arch ppc603} %{Zdynamiclib:-arch_only ppc603}} \ + %{mcpu=604: %{!Zdynamiclib:-arch ppc604} %{Zdynamiclib:-arch_only ppc604}} \ + %{mcpu=604e: %{!Zdynamiclib:-arch ppc604e} %{Zdynamiclib:-arch_only ppc604}} \ + %{mcpu=750: %{!Zdynamiclib:-arch ppc750} %{Zdynamiclib:-arch_only ppc750}} \ + %{mcpu=7400: %{!Zdynamiclib:-arch ppc7400} %{Zdynamiclib:-arch_only ppc7400}} \ + %{mcpu=7450: %{!Zdynamiclib:-arch ppc7450} %{Zdynamiclib:-arch_only ppc7450}} \ + %{mcpu=970: %{!Zdynamiclib:-arch ppc970} %{Zdynamiclib:-arch_only ppc970}} \ + %{mcpu=G5: %{!Zdynamiclib:-arch ppc970} %{Zdynamiclib:-arch_only ppc970}} \ + %{!mcpu*:%{!march*:%{!Zdynamiclib:-arch ppc} %{Zdynamiclib:-arch_only ppc}}} " + +#define DARWIN_ARCH_ASM_SPEC \ +"%{mcpu=601: -arch ppc601} \ + %{mcpu=603: -arch ppc603} \ + %{mcpu=604: -arch ppc604} \ + %{mcpu=604e: -arch ppc604e} \ + %{mcpu=750: -arch ppc750} \ + %{mcpu=7400: -arch ppc7400} \ + %{mcpu=7450: -arch ppc7450} \ + %{mcpu=970: -arch ppc970} \ + %{mcpu=G5: -arch ppc970} \ + %{!mcpu*:%{!march*: -arch ppc}} " #undef SUBTARGET_EXTRA_SPECS #define SUBTARGET_EXTRA_SPECS \ + { "darwin_arch_asm_spec", DARWIN_ARCH_ASM_SPEC }, \ + { "darwin_arch_ld_spec", DARWIN_ARCH_LD_SPEC }, \ { "darwin_arch", "ppc" }, +/* APPLE LOCAL end 3492132 */ + /* The "-faltivec" option should have been called "-maltivec" all along. */ #define SUBTARGET_OPTION_TRANSLATE_TABLE \ { "-faltivec", "-maltivec -include altivec.h" }, \ @@ -130,6 +160,24 @@ do { \ #undef RS6000_PIC_OFFSET_TABLE_REGNUM #define RS6000_PIC_OFFSET_TABLE_REGNUM 31 +/* APPLE LOCAL begin -pg fix */ +/* -pg has a problem which is normally concealed by -fPIC; + either -mdynamic-no-pic or -static exposes the -pg problem, causing the + crash. FSF gcc for Darwin also has this bug. The problem is that -pg + causes several int registers to be saved and restored although they may + not actually be used (config/rs6000/rs6000.c:first_reg_to_save()). In the + rare case where none of them is actually used, a consistency check fails + (correctly). This cannot happen with -fPIC because the PIC register (R31) + is always "used" in the sense checked by the consistency check. The + easy fix, here, is therefore to mark R31 always "used" whenever -pg is on. + A better, but harder, fix would be to improve -pg's register-use + logic along the lines suggested by comments in the function listed above. */ +#undef PIC_OFFSET_TABLE_REGNUM +#define PIC_OFFSET_TABLE_REGNUM ((flag_pic || profile_flag) \ + ? RS6000_PIC_OFFSET_TABLE_REGNUM \ + : INVALID_REGNUM) +/* APPLE LOCAL end -pg fix */ + /* Pad the outgoing args area to 16 bytes instead of the usual 8. */ #undef STARTING_FRAME_OFFSET @@ -145,14 +193,30 @@ do { \ /* These are used by -fbranch-probabilities */ #define HOT_TEXT_SECTION_NAME "__TEXT,__text,regular,pure_instructions" +/* APPLE LOCAL begin hot/cold partitioning */ #define UNLIKELY_EXECUTED_TEXT_SECTION_NAME \ - "__TEXT,__text2,regular,pure_instructions" + "__TEXT,__unlikely,regular,pure_instructions" +/* APPLE LOCAL end hot/cold partitioning */ +/* APPLE LOCAL begin long branch */ /* Define cutoff for using external functions to save floating point. - Currently on Darwin, always use inline stores. */ + For Darwin, use the function for more than a few registers. */ + +/* APPLE LOCAL begin 3414605 */ +#undef FP_SAVE_INLINE +#define FP_SAVE_INLINE(FIRST_REG) \ +(optimize >= 3 \ +|| ((FIRST_REG) > 60 && (FIRST_REG) < 64) \ +|| TARGET_LONG_BRANCH) +/* APPLE LOCAL end 3414605 */ -#undef FP_SAVE_INLINE -#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 64) +/* Define cutoff for using external functions to save vector registers. */ + +#undef VECTOR_SAVE_INLINE +#define VECTOR_SAVE_INLINE(FIRST_REG) \ + (((FIRST_REG) >= LAST_ALTIVEC_REGNO - 1 && (FIRST_REG) <= LAST_ALTIVEC_REGNO) \ + || TARGET_LONG_BRANCH) +/* APPLE LOCAL end long branch */ /* The assembler wants the alternate register names, but without leading percent sign. */ @@ -212,11 +276,7 @@ do { \ #undef ASM_COMMENT_START #define ASM_COMMENT_START ";" -/* FP save and restore routines. */ -#define SAVE_FP_PREFIX "._savef" -#define SAVE_FP_SUFFIX "" -#define RESTORE_FP_PREFIX "._restf" -#define RESTORE_FP_SUFFIX "" +/* APPLE LOCAL don't define SAVE_FP_PREFIX and friends */ /* This is how to output an assembler line that says to advance the location counter to a multiple of 2**LOG bytes using the @@ -288,38 +348,64 @@ do { \ ? GENERAL_REGS \ : (CLASS)) -/* Fix for emit_group_load (): force large constants to be pushed via regs. */ -#define ALWAYS_PUSH_CONSTS_USING_REGS_P 1 - -/* This now supports a natural alignment mode */ -/* Darwin word-aligns FP doubles but doubleword-aligns 64-bit ints. */ -#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \ - (TARGET_ALIGN_NATURAL ? (COMPUTED) : \ - (TYPE_MODE (TREE_CODE (TREE_TYPE (FIELD)) == ARRAY_TYPE \ - ? get_inner_array_type (FIELD) \ - : TREE_TYPE (FIELD)) == DFmode \ - ? MIN ((COMPUTED), 32) : (COMPUTED))) - -/* Darwin increases natural record alignment to doubleword if the first - field is an FP double while the FP fields remain word aligned. */ -#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \ - ((TREE_CODE (STRUCT) == RECORD_TYPE \ - || TREE_CODE (STRUCT) == UNION_TYPE \ - || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \ - && TARGET_ALIGN_NATURAL == 0 \ - ? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \ - : (TARGET_ALTIVEC && TREE_CODE (STRUCT) == VECTOR_TYPE) \ - ? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \ - : MAX ((COMPUTED), (SPECIFIED))) +/* APPLE LOCAL begin Macintosh alignment 2002-2-26 ff */ +/* This now supports the Macintosh power, mac68k, and natural + alignment modes. It now has one more parameter than the standard + version of the ADJUST_FIELD_ALIGN macro. + + The macro works as follows: We use the computed alignment of the + field if we are in the natural alignment mode or if the field is + a vector. Otherwise, if we are in the mac68k alignment mode, we + use the minimum of the computed alignment and 16 (pegging at + 2-byte alignment). If we are in the power mode, we peg at 32 + (word alignment) unless it is the first field of the struct, in + which case we use the computed alignment. */ +#undef ADJUST_FIELD_ALIGN +#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED, FIRST_FIELD_P) \ + (TARGET_ALIGN_NATURAL ? (COMPUTED) : \ + (((COMPUTED) == RS6000_VECTOR_ALIGNMENT) \ + ? RS6000_VECTOR_ALIGNMENT \ + : (MIN ((COMPUTED), \ + (TARGET_ALIGN_MAC68K ? 16 \ + : ((FIRST_FIELD_P) ? (COMPUTED) \ + : 32)))))) + +#undef ROUND_TYPE_ALIGN +/* Macintosh alignment modes require more complicated handling + of alignment, so we replace the macro with a call to a + out-of-line function. */ +union tree_node; +extern unsigned round_type_align (union tree_node*, unsigned, unsigned); /* rs6000.c */ +#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \ + round_type_align(STRUCT, COMPUTED, SPECIFIED) +/* APPLE LOCAL end Macintosh alignment 2002-2-26 ff */ + +/* APPLE LOCAL begin alignment */ +/* Make sure local alignments come from the type node, not the mode; + mode-based alignments are wrong for vectors. */ +#undef LOCAL_ALIGNMENT +#define LOCAL_ALIGNMENT(TYPE, ALIGN) (MAX ((unsigned) ALIGN, \ + TYPE_ALIGN (TYPE))) +/* APPLE LOCAL end alignment */ /* XXX: Darwin supports neither .quad, or .llong, but it also doesn't support 64 bit PowerPC either, so this just keeps things happy. */ #define DOUBLE_INT_ASM_OP "\t.quad\t" +/* APPLE LOCAL begin branch cost */ +#undef BRANCH_COST +/* Better code is generated by saying conditional branches take 1 tick. */ +#define BRANCH_COST 1 +/* APPLE LOCAL end branch cost */ + +/* APPLE LOCAL indirect calls in R12 */ +/* Address of indirect call must be computed here */ +#define MAGIC_INDIRECT_CALL_REG 12 + /* For binary compatibility with 2.95; Darwin C APIs use bool from stdbool.h, which was an int-sized enum in 2.95. */ #define BOOL_TYPE_SIZE INT_TYPE_SIZE -#undef REGISTER_TARGET_PRAGMAS -#define REGISTER_TARGET_PRAGMAS DARWIN_REGISTER_TARGET_PRAGMAS +/* APPLE LOCAL OS pragma hook */ +/* Register generic Darwin pragmas as "OS" pragmas. */ diff --git a/gcc/config/rs6000/ops-to-gp b/gcc/config/rs6000/ops-to-gp new file mode 100755 index 00000000000..becb406749b --- /dev/null +++ b/gcc/config/rs6000/ops-to-gp @@ -0,0 +1,620 @@ +#!/bin/sh +# APPLE LOCAL file AltiVec +# ops-to-gp -gcc vec.ops builtin.ops +# Creates vec.h used by rs6000.c + +arg0=`basename $0` +err() { + echo "$arg0: $*" 1>&2 + exit 2 +} + +if [ $# -eq 0 ] ; then + echo "Usage: $arg0 [ -mcc | -gcc ] builtin-ops ..." 1>&2 + exit 1 +fi + +MCC=1 +GCC=0 +suffix="gp" +if [ "$1" = "-mcc" ] ; then + shift; +elif [ "$1" = "-gcc" ] ; then + GCC=1 + MCC=0 + suffix="h" + shift; +fi + +output=`basename $1 .ops` +gperf="gperf -G -a -o -k1-15 -p -t -D -T -N Is_Builtin_Function $output.gp"; + +# Lines in the ops file have the form +# @ @ betype betype-code type-spelling +# @ fetype betype [code] +# @ @ @ instruction type +# generic op1 op2 ... opn = result specific when configure [addressible +# [instruction [const_ptr_ok [volatile_ptr_ok [transform [predicate]]]]]] + +# Sort the ops file to put it in a canonical order. +sort -u $* | \ + +# Add specific function uid's, make generic functions from specific +# functions, validate the types used, compute default parameters, and +# compute parts of the default transform and predicate functions. +awk 'BEGIN { + i = 0 + EQ = i++ + RESULT = i++ + SPECIFIC = i++ + WHEN = i++ + CONFIGURED = i++ + ADDRESSIBLE = i++ + INSTRUCTION = i++ + CONST_PTR_OK = i++ + VOLATILE_PTR_OK = i++ + TRANSFORM = i++ + PREDICATE = i++ + n_lines = 1; + tree[3] = "Make_Folded_4tree"; + tree[2] = "Make_Folded_3tree"; + tree[1] = "Make_Folded_Btree"; + tree[0] = "Make_Utree"; + optimize["vec_sub"] = 1; + optimize["vec_subs"] = 1; + optimize["vec_xor"] = 1; + optimize["vec_andc"] = 1; + optimize["vec_avg"] = 2; + optimize["vec_or"] = 2; + optimize["vec_and"] = 2; + optimize["vec_max"] = 2; + optimize["vec_min"] = 2; + optimize["vec_sld"] = 3; + optimize["vec_splat_s8"] = 4; + optimize["vec_splat_s16"] = 5; + optimize["vec_splat_s32"] = 6; + optimize["vec_splat_u8"] = 4; + optimize["vec_splat_u16"] = 5; + optimize["vec_splat_u32"] = 6; + optimize["vec_cmpeq"] = 7; + optimize["vec_lvsl"] = 8; + optimize["vec_lvsr"] = 9; + # These operations need additional transformation. Key off the + # optimize attribute to identify them. + optimize["vec_cmplt"] = 10; + optimize["vec_cmple"] = 10; + optimize["vec_abs"] = 11; + optimize["vec_abss"] = 11; + } + function no_type(t) { + printf "%% Error: type %s not declared.\n", t; + status = 1; + exit; + } + # Record the type. + $1 == "@" { + if ($2 == "@") { + if ($3 == "@") { + # Definition of an instruction. + insn_type[$4] = $5; # type + } else { + # Definition of a betype. + becode[$3] = $4; # betype-code + bespell[$3] = $5; # type-spelling + gsub(/\=/, " ", bespell[$3]); + } + } else { + # Definition of a fetype. + print $0; + if (!becode[$3]) no_type($3); # Must have defined the betype. + betype[$2] = $3; # betype; + if (NF == 3) + code[$2] = ""; + else + code[$2] = $4; # code + } + } + function no_equal(i,l) { + printf "%% Syntax error %d: %s\n", i, l; + status = 1; + exit; + } + function error(f,a) { + printf( ("%% error: " f), a); + status = 1; + exit; + } + # Ignore comment lines. + $1 != "#" && $1 != "@" { + # Generate the signature of the specific function, the predicate, + # the transform, the arguments to the transform function, the + # arguments to the predicate function, and the spelling of the + # function type. + signature = ""; + predicate = ""; + transform = ""; + insn_code = ""; + transform_args = ""; + predicate_args = ""; + function_type = ""; + # First, consider the parameter types. + for (i = 2; $i != "=" && i < NF; i++) { + if ($i != "...") { + if (!betype[$i]) no_type($i); + signature = (signature " " $i); + predicate = (predicate "_" betype[$i]); + transform = (transform code[$i]); + transform_args = (transform_args ", ND_kid(t," i-1 ")"); + predicate_args = (predicate_args " " becode[betype[$i]]); + if (function_type) + function_type = (function_type ", " bespell[betype[$i]]); + else + function_type = bespell[betype[$i]]; + } + } + constraints = (transform "@"); + # Check the syntax of the ops file. + if ($i != "=" || NF > i+PREDICATE || NF < i+CONFIGURE) no_equal(i,$0); + if (!betype[$(i+RESULT)]) no_type($(i+RESULT)); + # Incorporate the result type. + if (i == 2) { + predicate = "_void"; + function_type = "void"; + } + signature = ($(i+SPECIFIC) signature); + predicate = sprintf("is_%s_func%s", betype[$(i+RESULT)], predicate); + predicate_args = (becode[betype[$(i+RESULT)]] predicate_args); + function_type = sprintf("(%s (*)(%s))", bespell[betype[$(i+RESULT)]], \ + function_type); + if (substr(code[$(i+RESULT)], 1, 1) == "j") { + # Handle a jump asm. The code is expedted to be + # j={cc-bit-num}={cc-bit-value}[={r|d}]. The operation must have + # one operand if the code d is used and two operands otherwise. + # The transform function can implement the r code by reversing the + # two operands. In all cases, the first operand is a computed + # constant encoding both the bit number and the test. + n = split(code[$(i+RESULT)], jmp, "="); + if (jmp[n] == "d" && i != 3) error("%d operands", i-2); + if (jmp[n] != "d" && i != 4) error("%d operands", i-2); + if (jmp[n] == "r") + transform_args = ", ND_kid(t,2), ND_kid(t,1)"; + transform_args = sprintf("%s(OP_VCMP%s%s", tree[i-2], \ + toupper(jmp[3]), transform_args); + if (jmp[n] == "r") + transform = ("r" transform); + insn_code = sprintf("CODE_FOR_j_%d_%s_f%s", jmp[2], jmp[3], \ + transform); + transform = sprintf("transform_j_%d_%s_f%s", jmp[2], jmp[3], \ + transform); + } else { + transform_args = sprintf("%s(OP_%sASM%s%s", tree[i-2], \ + toupper(code[$(i+RESULT)]), \ + toupper(transform), transform_args); + insn_code = sprintf("CODE_FOR_%sf%s", code[$(i+RESULT)], transform); + transform = sprintf("transform_%sf%s", code[$(i+RESULT)], transform); + } + # Give a unique id to the signature + if (count[signature] == 0) + count[signature] = ++uid[$(i+SPECIFIC)]; + + # Compute the default instruction name + nf = split($(i+SPECIFIC), part, "_"); + instruction = ("MOP_" part[nf]); + + # Compute the insn_code, but use the instruction override if given. + if (NF >= i+INSTRUCTION) + instruction = $(i+INSTRUCTION); + if (insn_type[instruction]) + insn_code = (insn_code "_" insn_type[instruction]); + + # Allow the user to override the addressibility, instruction, + # const_ptr_ok, volatile_ptr_ok, transform, and predicate. + if (NF >= i+ADDRESSIBLE) + addressible = ""; + else + addressible = "FALSE"; + + if (NF >= i+INSTRUCTION) + instruction = ""; + else if (substr($1, 1, 4) == "vec_") + print "@ @3", instruction; + + if (NF >= i+CONST_PTR_OK) + const_ptr_ok = ""; + else + const_ptr_ok = "FALSE"; + + if (NF >= i+VOLATILE_PTR_OK) + volatile_ptr_ok = ""; + else + volatile_ptr_ok = "FALSE"; + + if (NF >= i+TRANSFORM) + transform = ""; + else + print "@ @1", transform, transform_args; + + if (NF >= i+PREDICATE) + predicate = ""; + else + print "@ @2", i-2, predicate, predicate_args, function_type; + + if (optimize[$1]) + optimize_method = optimize[$1]; + else + optimize_method = "0"; + + # Record the line, addressibility, instruction, transform, + # predicate, and unique id. + line[n_lines++] = ($0 " " addressible " " instruction " " \ + const_ptr_ok " " volatile_ptr_ok " " transform " " \ + predicate " " insn_code " " constraints " " \ + optimize_method " " count[signature]); + } + END { + if (status) exit; + # generic op1 op2 ... opn = result specific when configured + # addressable instruction const_ptr_ok volatile_ptr_ok + # transform predicate insn_code constraints optimize uid + SPECIFIC = 12 + for (i = 1; i < n_lines; i++) { + nf = split(line[i], part); + specific = part[nf-SPECIFIC]; + + # Print the generic form. + printf "%s", part[1]; + for (j = 2; j <= nf-SPECIFIC; j++) printf " %s", part[j]; + if (uid[specific] > 1) printf ":%d", part[nf]; + while (j < nf) printf " %s", part[j++]; + printf "\n"; + + # Print the specific form. + printf "%s", specific; + for (j = 2; j <= nf-SPECIFIC; j++) printf " %s", part[j]; + if (uid[specific] > 1) printf ":%d", part[nf]; + while (j < nf) printf " %s", part[j++]; + printf "\n"; + } + }' | \ + +# Strip out load and store qualifiers. +sed -e 's/_load_op//g' -e 's/_store_op//g' | \ + +# Sort the processed file and eliminate duplicates. +sort -u | \ + +# Append the count of each generic function to each line. +awk 'function push() { + if (num) + for (i = 0; i < num; i++) + print line[i], num; + num = 0; + } + $1 == "@" { + print $0; + } + $1 != "@" { + if (last != $1) + push(); + last = $1; + line[num++] = $0; + } + END { + push(); + }' | \ + +# Now compute the gperf input file. +# Lines now have a fixed format +# generic op1 ... opn = result specific instruction when configured +# addressible const_ptr_ok volatile_ptr_ok transform predicate +# insn_code constraints optimize count +awk 'BEGIN { + MCC = '$MCC' + GCC = '$GCC' + i = 0; + COUNT = i++ + OPTIMIZE = i++ + CONSTRAINTS = i++ + INSN_CODE = i++ + PREDICATE = i++ + TRANSFORM = i++ + VOLATILE_PTR_OK = i++ + CONST_PTR_OK = i++ + INSTRUCTION = i++ + ADDRESSIBLE = i++ + CONFIGURED = i++ + WHEN = i++ + SPECIFIC = i++ + RESULT = i++ + EQ = i++ + OPN = i++ + NARGS = i++ + if (MCC) { + print "%{"; + print "/* Command-line: '"$gperf"' */"; + MAXARGS = 5 + } + if (GCC) + MAXARGS = 3 + } + function write_test(tree, type, num) { + if (type == "PTR") { + printf "\n && TY_kind(%s) == KIND_POINTER", tree; + } else if (type == "I5") { + printf "\n && is_integer_type(%s)", tree; + printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num; + printf "\n && ((UINT32)Targ_To_Host(tc) + 16) < 32"; + } else if (type == "U5") { + printf "\n && is_integer_type(%s)", tree; + printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num; + printf "\n && (UINT32)Targ_To_Host(tc) < 32"; + } else if (type == "U4") { + printf "\n && is_integer_type(%s)", tree; + printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num; + printf "\n && (UINT32)Targ_To_Host(tc) < 16"; + } else if (type == "U2") { + printf "\n && is_integer_type(%s)", tree; + printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num; + printf "\n && (UINT32)Targ_To_Host(tc) < 4"; + } else if (type == "BETYPE_U4" || type == "BETYPE_I4") { + printf "\n && is_integer_type(%s)", tree; + } else { + printf "\n && Similar_Types(%s,", tree; + printf "\n\t\t Be_Type_Tbl(%s), IGNORE_QUALIFIERS)", type; + } + } + $1 == "@" { + if (MCC) { + if ($2 == "@1") { + # Write the predicate function from the given parameters. + # The format is: + # @ @1 transform_ifii Make_3tree(OP_IASMII, ND_kid(t,1), ND_kid(t,2) + print ""; + print "/*ARGSUSED*/"; + print "static void"; + print $3 "(ND *func, ND *parent, ND *t, struct builtin *self)"; + print "{"; + printf " *t = *%s", $4; + for (i = 5; i <= NF; i++) printf " %s", $i; + print ","; + if (split($3,jmp,"_") == 5 && jmp[2] == "j") + printf "\t\t MK_I4CONST_ND((self->data << 5) + %d));\n", \ + jmp[3]; + else + print "\t\t MK_I4CONST_ND(self->data));"; + + print " Is_True(self->data > 0, (\"No implementation for %s\", self->name));"; + print "}"; + } else if ($2 == "@2") { + # Write the transform function from the given parameters. + # The format is: + # @ @2 2 is_int_func_int_int BETYPE_I4 BETYPE_I4 BETYPE_I4 + # (int (*)(int, int)) + print ""; + print "/*ARGSUSED*/"; + print "static BOOL"; + print $4 "(ND *func, ND *parent, ND *t, struct builtin *self)"; + print "{"; + print " TCON tc;"; + printf " if (ND_nkids(t) == %d", $3+1; + write_test("ST_type(ND_dec(func))", $5, ""); + for (i = 1; i <= $3; i++) { + printf "\n && ND_name(ND_kid(t,%d)) == TO_VAL", i; + write_test(sprintf("The_Tree_Type(ND_kid(t,%d))", i), $(i+5), i); + } + print ")"; + print " return TRUE;"; + print " Error_Prt_Line (ND_linenum(t), ec_builtin_function_type, self->name,"; + i = $3+6; + printf "\t\t \"%s", $i; + while (++i <= NF) printf " %s", $i; + print "\");"; + print " return FALSE;"; + print "}"; + } else if ($2 == "@3") { + if (once++ == 0) printf "\n#ifndef HAVE_ALTIVEC\n"; + printf "#define %s -1\n", $3; + } else { + if (once && twice++ == 0) printf "#endif /* HAVE_ALTIVEC */\n\n"; + printf "extern struct a_type *T_%s;\n", $2; + } + } + next; + } + $1 == "%" { + print $0; + status = 1; + exit; + } + { + # Compute the signature of the generic function. + signature=$1; + for (i = 2; i <= NF-OPN; i++) { + if ($i != "...") + signature=(signature " " $i); + } + + # Ensure that the signature is unique. + if (signature_line[signature]) { + print "Ambiguous signatures:"; + print $0; + print line[signature_line[signature]]; + } + signature_line[signature] = n_lines; + + # Require that overloaded functions have the same attributes: + # number of arguments, when, configured, and addressible. + if (same_arg_count[$1] && same_arg_count[$1] != NF) + printf "%% number of arguments for %s varies: %d and %d\n", \ + $1, NF-NARGS, same_arg_count[$1]-NARGS; + same_arg_count[$1] = NF; + + if (same_when[$1] && same_when[$1] != $(NF-WHEN)) + printf "%% when for %s varies: %s and %s\n", \ + $1, $(NF-WHEN), same_when[$1]; + same_when[$1] = $(NF-WHEN); + + if (same_configured[$1] && same_configured[$1] != $(NF-CONFIGURED)) + printf "%% configured for %s varies: %s and %s\n", \ + $1, $(NF-CONFIGURED), same_configured[$1]; + same_configured[$1] = $(NF-CONFIGURED); + + if (same_addressible[$1] && same_addressible[$1] != $(NF-ADDRESSIBLE)) + printf "%% addressible for %s varies: %s and %s\n", \ + $1, $(NF-ADDRESSIBLE), same_addressible[$1]; + else if (same_addressible[$1] && same_addressible[$1] != "FALSE") + printf "%% Overloaded function %s is addressible\n", $1 + same_addressible[$1] = $(NF-ADDRESSIBLE); + + # Record the line. + line[n_lines++] = $0; + } + function push(fcn, n) { + if (last) printf "};\n"; + # Gcc3: declare as arrays of const pointers + if (fcn) printf "static const struct builtin *const O_%s[%d] = {\n", fcn, n; + last = fcn; + } + function mangle(name) { + if (split(name, names, ":") == 1) + return ("B_" names[1]); + return ("B" names[2] "_" names[1]); + } + END { + if (status) exit; + + # Gcc3: Mark file as Apple local + printf "/* APPLE LOCAL file AltiVec */\n"; + printf "/* This file is generated by ops-to-gp. Do not edit. */\n\n"; + printf "/* To regenerate execute:\n"; + printf " ops-to-gp -gcc vec.ops builtin.ops\n"; + printf " with the current directory being gcc/config/rs6000. */\n\n"; + + # Output the description of each specific function. + uid = 0; + if (MCC) print ""; + for (i = 0; i < n_lines; i++) { + nf = split(line[i], part); + fcn = part[nf-SPECIFIC]; + if (!done[fcn]) { + printf "static const struct builtin %s = {", mangle(fcn); + if (GCC) printf " {"; + ellipsis = 1; + for (j = 2; j <= nf-OPN; j++) + if (part[j] != "...") { + printf " &T_%s,", part[j]; + } else { + ellipsis = -1; + printf " NULL,"; + } + while (j++ <= MAXARGS+1) + printf " NULL,"; + instruction = part[nf-INSTRUCTION]; + if (substr(instruction, 1, 4) == "MOP_") + instruction = substr(instruction, 5); + if (substr(instruction, length(instruction)) == "D") + instruction = (substr(instruction, 1, length(instruction) - 1) "."); + # Gcc3: Prefix each specific instruction with a "*" + if (match (instruction, "^[a-zA-Z]") > 0) + instruction = "*" instruction; + if (GCC) printf " },"; + if (GCC) printf " \"%s\",", substr(part[nf-CONSTRAINTS], 1, length(part[nf-CONSTRAINTS]) - 1); + printf " &T_%s,", part[nf-RESULT]; + if (MCC) printf " \"%s\",", part[nf-SPECIFIC]; + printf " %d,", ellipsis * (nf - NARGS); + if (MCC) { + printf " %s,", part[nf-WHEN]; + printf " %s,", part[nf-ADDRESSIBLE]; + printf " %s,", part[nf-CONST_PTR_OK]; + printf " %s,", part[nf-VOLATILE_PTR_OK]; + printf " %s,", part[nf-CONFIGURED]; + printf " %s,", part[nf-INSTRUCTION]; + printf " %s,", part[nf-TRANSFORM]; + printf " %s", part[nf-PREDICATE]; + } else if (GCC) { + printf " %s,", part[nf-CONST_PTR_OK]; + printf " %s,", part[nf-VOLATILE_PTR_OK]; + printf " %s,", part[nf-OPTIMIZE]; + printf " \"%s\",", part[nf-SPECIFIC]; + printf " \"%s\",", instruction; + printf " %s,", part[nf-INSN_CODE]; + printf " B_UID(%d)", uid++; + } + printf " };\n"; + } + done[fcn] = 1; + } + + if (GCC) printf "#define LAST_B_UID B_UID(%d)\n", uid; + + if (GCC) { + # Output the description of each specific function. + print ""; + uid = 0; + for (i in done) + done[i] = ""; + print "const struct builtin * const Builtin[] = {" + for (i = 0; i < n_lines; i++) { + nf = split(line[i], part); + fcn = part[nf-SPECIFIC]; + if (!done[fcn]) { + printf " &%s,\n", mangle(fcn); + } + done[fcn] = 1; + } + print "};" + } + + # Output the overload tables for each generic function. + print ""; + for (i = 0; i < n_lines; i++) { + nf = split(line[i], part); + fcn = part[1]; + if (last != fcn) + push(fcn, part[nf]); + printf " &%s,\n", mangle(part[nf-SPECIFIC]); + } + push("", 0); + + # Output the builtin function structure. + print ""; + if (MCC) { + print "%}"; + print "struct overloadx {"; + print " char *name;"; + print " int fcns;"; + print " int args;"; + print " struct builtin **functions;"; + print "};"; + print "%%"; + } else if (GCC) { + print "const struct overloadx Overload[] = {"; + } + + # Output the builtin function list and data. + uid = 0; + for (i = 0; i < n_lines; i++) { + nf = split(line[i], part); + fcn = part[1]; + args = nf - NARGS; + if (part[nf-OPN] == "...") args = -args; + if (last != fcn) { + if (MCC) printf "%s, %d, %d, O_%s\n", fcn, part[nf], args, fcn; + if (GCC) printf " { \"%s\", %d, %d, O_%s, O_UID(%d) },\n", \ + fcn, part[nf], args, fcn, uid++; + } + last = fcn; + } + + if (GCC) { + print " { NULL, 0, 0, NULL, 0 }" + print "};"; + + printf "#define LAST_O_UID O_UID(%d)\n", uid; + } + + }' > $output.$suffix + +if [ "$MCC" = "1" ] ; then + $gperf > $output.h +fi diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c index 5d36d5d28b3..b48e4b019e7 100644 --- a/gcc/config/rs6000/rs6000-c.c +++ b/gcc/config/rs6000/rs6000-c.c @@ -30,6 +30,11 @@ #include "c-pragma.h" #include "errors.h" #include "tm_p.h" +/* APPLE LOCAL begin AltiVec */ +#include "c-common.h" +#include "cpplib.h" +#include "target.h" +/* APPLE LOCAL end AltiVec */ /* Handle the machine specific pragma longcall. Its syntax is @@ -78,6 +83,98 @@ rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED) #define builtin_define(TXT) cpp_define (pfile, TXT) #define builtin_assert(TXT) cpp_assert (pfile, TXT) +/* APPLE LOCAL begin AltiVec */ +/* Keep the AltiVec keywords handy for fast comparisons. */ +static GTY(()) cpp_hashnode *__vector_keyword; +static GTY(()) cpp_hashnode *vector_keyword; +static GTY(()) cpp_hashnode *__pixel_keyword; +static GTY(()) cpp_hashnode *pixel_keyword; +static GTY(()) cpp_hashnode *__bool_keyword; +static GTY(()) cpp_hashnode *bool_keyword; +static GTY(()) cpp_hashnode *_Bool_keyword; + +/* Called to decide whether a conditional macro should be expanded. */ + +bool +rs6000_expand_macro_p (const cpp_token *tok) +{ + static bool expand_bool_pixel = 0; + bool expand_this = 0; + const cpp_hashnode *ident = tok->val.node; + + if (ident == vector_keyword) + { + tok = c_lex_peek (0); + if (tok->type == CPP_NAME) + { + ident = tok->val.node; + if (ident == pixel_keyword || ident == __pixel_keyword + || ident == bool_keyword || ident == __bool_keyword + || ident == _Bool_keyword) + expand_this = expand_bool_pixel = 1; + else + { + enum rid rid_code = (enum rid)(ident->rid_code); + + if (rid_code == RID_UNSIGNED || rid_code == RID_LONG + || rid_code == RID_SHORT || rid_code == RID_SIGNED + || rid_code == RID_INT || rid_code == RID_CHAR + || rid_code == RID_FLOAT) + { + expand_this = 1; + /* If the next keyword is bool or pixel, it + will need to be expanded as well. */ + tok = c_lex_peek (1); + if (tok->type == CPP_NAME) + { + ident = tok->val.node; + if (ident == pixel_keyword || ident == __pixel_keyword + || ident == bool_keyword || ident == __bool_keyword + || ident == _Bool_keyword) + expand_bool_pixel = 1; + } + } + } + } + } + else if (ident == pixel_keyword || ident == bool_keyword + || ident == _Bool_keyword) + { + if (expand_bool_pixel) + { + expand_this = 1; + expand_bool_pixel = 0; + } + } + + return expand_this; +} + +static void +cb_define_conditional_macro (cpp_reader *pfile ATTRIBUTE_UNUSED, + unsigned int n ATTRIBUTE_UNUSED, + cpp_hashnode *node) { + const unsigned char *name = node->ident.str; + bool underscore = (name[1] == '_'); + char kwd = (underscore ? name[2] : name[0]); + cpp_hashnode **kwd_node = 0; + + if (!underscore) /* macros without two leading underscores */ + node->flags |= NODE_DISABLED; /* shall be conditional */ + + switch (kwd) + { + case 'v': kwd_node = (underscore ? &__vector_keyword : &vector_keyword); break; + case 'p': kwd_node = (underscore ? &__pixel_keyword : &pixel_keyword); break; + case 'b': kwd_node = (underscore ? &__bool_keyword : &bool_keyword); break; + case '_': kwd_node = &_Bool_keyword; break; + default: abort (); + } + *kwd_node = node; +} + +/* APPLE LOCAL end AltiVec */ + void rs6000_cpu_cpp_builtins (cpp_reader *pfile) { @@ -93,13 +190,39 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfile) builtin_define ("_ARCH_COM"); if (TARGET_ALTIVEC) { + /* APPLE LOCAL begin AltiVec */ + struct cpp_callbacks *cb = cpp_get_callbacks (pfile); + void (*old_cb_define) (cpp_reader *, unsigned int, cpp_hashnode *) + = cb->define; + /* APPLE LOCAL end AltiVec */ + builtin_define ("__ALTIVEC__"); builtin_define ("__VEC__=10206"); /* Define the AltiVec syntactic elements. */ + + /* APPLE LOCAL AltiVec */ + cb->define = cb_define_conditional_macro; + builtin_define ("__vector=__attribute__((altivec(vector__)))"); builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short"); builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned"); + + /* APPLE LOCAL begin AltiVec */ + /* Keywords without two leading underscores are context-sensitive, and hence + implemented as conditional macros, controlled by the rs6000_expand_macro_p() + predicate above. */ + builtin_define ("vector=__attribute__((altivec(vector__)))"); + builtin_define ("pixel=__attribute__((altivec(pixel__))) unsigned short"); + builtin_define ("bool=__attribute__((altivec(bool__))) unsigned"); + builtin_define ("_Bool=__attribute__((altivec(bool__))) unsigned"); + cb->define = old_cb_define; + + /* Enable context-sensitive macros. */ + targetm.expand_macro_p = rs6000_expand_macro_p; + /* Enable '(vector signed int)(a, b, c, d)' vector literal notation. */ + targetm.cast_expr_as_vector_init = true; + /* APPLE LOCAL end AltiVec */ } if (TARGET_SPE) builtin_define ("__SPE__"); diff --git a/gcc/config/rs6000/rs6000-protos.h b/gcc/config/rs6000/rs6000-protos.h index 1121e309db9..b1675edc7b9 100644 --- a/gcc/config/rs6000/rs6000-protos.h +++ b/gcc/config/rs6000/rs6000-protos.h @@ -127,10 +127,13 @@ extern int mfcr_operation (rtx, enum machine_mode); extern int mtcrf_operation (rtx, enum machine_mode); extern int lmw_operation (rtx, enum machine_mode); extern struct rtx_def *create_TOC_reference (rtx); +/* APPLE LOCAL RTX_COST for multiply */ +extern int rs6000_rtx_mult_cost (rtx); extern void rs6000_split_multireg_move (rtx, rtx); extern void rs6000_emit_move (rtx, rtx, enum machine_mode); extern rtx rs6000_legitimize_address (rtx, rtx, enum machine_mode); -extern rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, +/* APPLE LOCAL pass reload addr by address */ +extern rtx rs6000_legitimize_reload_address (rtx *, enum machine_mode, int, int, int, int *); extern int rs6000_legitimate_address (enum machine_mode, rtx, int); extern bool rs6000_mode_dependent_address (rtx); @@ -204,8 +207,13 @@ extern int rs6000_tls_symbol_ref (rtx, enum machine_mode); extern void rs6000_pragma_longcall (struct cpp_reader *); extern void rs6000_cpu_cpp_builtins (struct cpp_reader *); +/* APPLE LOCAL AltiVec */ +extern bool rs6000_expand_macro_p (const struct cpp_token *); + #if TARGET_MACHO -char *output_call (rtx, rtx *, int, int); +void add_compiler_stub PARAMS ((tree, tree, int)); +void output_compiler_stub PARAMS ((void)); +extern char* output_call PARAMS ((rtx, rtx *, int, int)); #endif #endif /* rs6000-protos.h */ diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c index 73b912cf930..7a1a6c14b56 100644 --- a/gcc/config/rs6000/rs6000.c +++ b/gcc/config/rs6000/rs6000.c @@ -50,12 +50,20 @@ #include "target-def.h" #include "langhooks.h" #include "reload.h" +/* APPLE LOCAL why is this needed? */ +#include "insn-addr.h" #include "cfglayout.h" #include "sched-int.h" #if TARGET_XCOFF #include "xcoffout.h" /* get declarations of xcoff_*_section_name */ #endif +/* APPLE LOCAL begin Macintosh alignment */ +#ifndef TARGET_ALIGN_MAC68K +#define TARGET_ALIGN_MAC68K 0 +#endif +/* APPLE LOCAL end Macintosh alignment */ + #ifndef TARGET_NO_PROTOTYPE #define TARGET_NO_PROTOTYPE 0 #endif @@ -216,9 +224,16 @@ int rs6000_debug_arg; /* debug argument handling */ static GTY(()) tree opaque_V2SI_type_node; static GTY(()) tree opaque_V2SF_type_node; static GTY(()) tree opaque_p_V2SI_type_node; - -/* AltiVec requires a few more basic types in addition to the vector - types already defined in tree.c. */ +static GTY(()) tree V16QI_type_node; +static GTY(()) tree V2SI_type_node; +static GTY(()) tree V2SF_type_node; +static GTY(()) tree V4HI_type_node; +static GTY(()) tree V4SI_type_node; +static GTY(()) tree V4SF_type_node; +static GTY(()) tree V8HI_type_node; +static GTY(()) tree unsigned_V16QI_type_node; +static GTY(()) tree unsigned_V8HI_type_node; +static GTY(()) tree unsigned_V4SI_type_node; static GTY(()) tree bool_char_type_node; /* __bool char */ static GTY(()) tree bool_short_type_node; /* __bool short */ static GTY(()) tree bool_int_type_node; /* __bool int */ @@ -302,6 +317,7 @@ static void rs6000_assemble_visibility (tree, int); static int rs6000_ra_ever_killed (void); static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *); static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *); +static const char *rs6000_mangle_fundamental_type (tree); extern const struct attribute_spec rs6000_attribute_table[]; static void rs6000_set_default_type_attributes (tree); static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT); @@ -410,15 +426,18 @@ static rtx rs6000_spe_function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree); static rtx rs6000_mixed_function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int); -static void rs6000_move_block_from_reg(int regno, rtx x, int nregs); +static void rs6000_move_block_from_reg (int regno, rtx x, int nregs); static void setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int); +/* APPLE LOCAL begin Altivec */ +static bool skip_vec_args (tree, int, int*); +/* APPLE LOCAL begin Altivec */ #if TARGET_MACHO static void macho_branch_islands (void); static void add_compiler_branch_island (tree, tree, int); -static int no_previous_def (tree function_name); -static tree get_prev_label (tree function_name); +static int no_previous_def (tree); +static tree get_prev_label (tree); #endif static tree rs6000_build_builtin_va_list (void); @@ -575,6 +594,9 @@ static const char alt_reg_names[][8] = #undef TARGET_EXPAND_BUILTIN #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin +#undef TARGET_MANGLE_FUNDAMENTAL_TYPE +#define TARGET_MANGLE_FUNDAMENTAL_TYPE rs6000_mangle_fundamental_type + #undef TARGET_INIT_LIBFUNCS #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs @@ -616,6 +638,11 @@ static const char alt_reg_names[][8] = #undef TARGET_SETUP_INCOMING_VARARGS #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs +/* APPLE LOCAL begin Altivec */ +#undef TARGET_SKIP_VEC_ARGS +#define TARGET_SKIP_VEC_ARGS skip_vec_args +/* APPLE LOCAL end Altivec */ + /* Always strict argument naming on rs6000. */ #undef TARGET_STRICT_ARGUMENT_NAMING #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true @@ -636,6 +663,9 @@ rs6000_override_options (const char *default_cpu) size_t i, j; struct rs6000_cpu_select *ptr; int set_masks; +/* APPLE LOCAL begin -fast */ + enum processor_type mcpu_cpu; +/* APPLE LOCAL end -fast */ /* Simplifications for entries below. */ @@ -746,6 +776,20 @@ rs6000_override_options (const char *default_cpu) rs6000_select[0].string = default_cpu; rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT; + /* APPLE LOCAL begin -fast */ + if (flag_fast || flag_fastf || flag_fastcp) + { + mcpu_cpu = PROCESSOR_POWER4; + if (rs6000_select[1].string == (char *)0 && rs6000_select[2].string == (char *)0) + { + /* -mcpu and -mtune unspecified. Assume both are G5 */ + set_target_switch ("tune=G5"); + set_target_switch ("cpu=G5"); + } + } + /* APPLE LOCAL end -fast */ + + for (i = 0; i < ARRAY_SIZE (rs6000_select); i++) { ptr = &rs6000_select[i]; @@ -762,6 +806,9 @@ rs6000_override_options (const char *default_cpu) target_flags &= ~set_masks; target_flags |= (processor_target_table[j].target_enable & set_masks); + /* APPLE LOCAL begin -fast */ + mcpu_cpu = processor_target_table[j].processor; + /* APPLE LOCAL end -fast */ } break; } @@ -771,6 +818,49 @@ rs6000_override_options (const char *default_cpu) } } + /* APPLE LOCAL begin -fast */ + if (flag_fast || flag_fastf || flag_fastcp) + { + flag_gcse_sm = 1; + flag_loop_transpose = 1; + rs6000_sched_insert_nops = sched_finish_regroup_exact; + flag_unroll_loops = 1; + flag_strict_aliasing = 1; + flag_schedule_interblock = 1; + align_jumps_max_skip = 15; + align_loops_max_skip = 15; + align_functions = 16; + align_loops = 16; + align_jumps = 16; + set_fast_math_flags (1); + flag_reorder_blocks = 1; +#if 0 + if (flag_branch_probabilities + && !flag_exceptions) + flag_reorder_blocks_and_partition = 1; +#endif + if (!flag_pic) + set_target_switch ("dynamic-no-pic"); + + if (mcpu_cpu == PROCESSOR_POWER4) + { + set_target_switch ("powerpc-gpopt"); + set_target_switch ("powerpc64"); + } + if (flag_fast || flag_fastcp) + /* This doesn't work with NAG Fortran output. The gcc 3.5 C++ libraries + have been adjusted so that it now works with them. */ + set_target_switch ("align-natural"); + if (flag_fastf) + /* This applies Fortran argument semantics; for NAG Fortran output only. */ + flag_argument_noalias = 2; + /* IMI flags */ + disable_typechecking_for_spec_flag = 1; + flag_unit_at_a_time = 1; + } + /* APPLE LOCAL end -fast */ + + if (TARGET_E500) rs6000_isel = 1; @@ -1070,6 +1160,10 @@ rs6000_parse_alignment_option (void) { if (rs6000_alignment_string == 0) return; +/* APPLE LOCAL begin Macintosh alignment 2002-2-26 ff */ + else if (! strcmp (rs6000_alignment_string, "mac68k")) + rs6000_alignment_flags = MASK_ALIGN_MAC68K; +/* APPLE LOCAL end Macintosh alignment 2002-2-26 ff */ else if (! strcmp (rs6000_alignment_string, "power")) rs6000_alignment_flags = MASK_ALIGN_POWER; else if (! strcmp (rs6000_alignment_string, "natural")) @@ -1099,6 +1193,22 @@ rs6000_parse_tls_size_option (void) void optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED) { + /* APPLE LOCAL begin tweak default optimizations */ + if (DEFAULT_ABI == ABI_DARWIN) + { + /* Turn these on only if specifically requested, not with -O* */ + /* Strict aliasing breaks too much existing code */ + flag_strict_aliasing = 0; + /* Block reordering causes code bloat, and very little speedup */ + flag_reorder_blocks = 0; + /* Multi-basic-block scheduling loses badly when the compiler + misguesses which blocks are going to be executed, more than + it gains when it guesses correctly. Its guesses for cases + where interblock scheduling occurs (if-then-else's) are + little better than random, so disable this unless requested. */ + flag_schedule_interblock = 0; + } + /* APPLE LOCAL end tweak default optimizations */ } /* Do anything needed at the start of the asm file. */ @@ -2271,6 +2381,11 @@ call_operand (rtx op, enum machine_mode mode) return 0; return (GET_CODE (op) == SYMBOL_REF + /* APPLE LOCAL begin accept hard R12 as target reg */ +#ifdef MAGIC_INDIRECT_CALL_REG + || (GET_CODE (op) == REG && REGNO (op) == MAGIC_INDIRECT_CALL_REG) +#endif + /* APPLE LOCAL end accept hard R12 as target reg */ || (GET_CODE (op) == REG && (REGNO (op) == LINK_REGISTER_REGNUM || REGNO (op) == COUNT_REGISTER_REGNUM @@ -2356,7 +2471,7 @@ rs6000_special_round_type_align (tree type, int computed, int specified) tree field = TYPE_FIELDS (type); /* Skip all the static variables only if ABI is greater than - 1 or equal to 0. */ + 1 or equal to 0. */ while (field != NULL && TREE_CODE (field) == VAR_DECL) field = TREE_CHAIN (field); @@ -3050,9 +3165,12 @@ rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED) The Darwin code is inside #if TARGET_MACHO because only then is machopic_function_base_name() defined. */ rtx -rs6000_legitimize_reload_address (rtx x, enum machine_mode mode, +/* APPLE LOCAL pass reload addr by address */ +rs6000_legitimize_reload_address (rtx *addr_x, enum machine_mode mode, int opnum, int type, int ind_levels ATTRIBUTE_UNUSED, int *win) { + /* APPLE LOCAL pass reload addr by address */ + rtx x = *addr_x; /* We must recognize output that we have already generated ourselves. */ if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS @@ -3408,6 +3526,57 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2) return dest; } +/* APPLE LOCAL begin RTX_COST for multiply */ +int +rs6000_rtx_mult_cost (rtx x) +{ + switch (rs6000_cpu) + { + case PROCESSOR_RIOS1: + case PROCESSOR_PPC405: + return (GET_CODE (XEXP (x, 1)) != CONST_INT + ? COSTS_N_INSNS (5) + : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255 + ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4)); + case PROCESSOR_RS64A: + return (GET_CODE (XEXP (x, 1)) != CONST_INT + ? GET_MODE (XEXP (x, 1)) != DImode + ? COSTS_N_INSNS (20) : COSTS_N_INSNS (34) + : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255 + ? COSTS_N_INSNS (8) : COSTS_N_INSNS (12)); + case PROCESSOR_RIOS2: + case PROCESSOR_MPCCORE: + case PROCESSOR_PPC604e: + return COSTS_N_INSNS (2); + case PROCESSOR_PPC601: + return COSTS_N_INSNS (5); + case PROCESSOR_PPC603: + case PROCESSOR_PPC7400: + case PROCESSOR_PPC750: + return (GET_CODE (XEXP (x, 1)) != CONST_INT + ? COSTS_N_INSNS (5) + : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255 + ? COSTS_N_INSNS (2) : COSTS_N_INSNS (3)); + case PROCESSOR_PPC7450: + return (GET_CODE (XEXP (x, 1)) != CONST_INT + ? COSTS_N_INSNS (4) + : COSTS_N_INSNS (3)); + case PROCESSOR_PPC403: + case PROCESSOR_PPC604: + return COSTS_N_INSNS (4); + case PROCESSOR_PPC620: + case PROCESSOR_PPC630: + return (GET_CODE (XEXP (x, 1)) != CONST_INT + ? GET_MODE (XEXP (x, 1)) != DImode + ? COSTS_N_INSNS (5) : COSTS_N_INSNS (7) + : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255 + ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4)); + default: + abort (); + } +} +/* APPLE LOCAL end RTX_COST for multiply */ + /* Emit a move from SOURCE to DEST in mode MODE. */ void rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode) @@ -3845,6 +4014,8 @@ init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype, /* Check for a longcall attribute. */ if (fntype + /* APPLE LOCAL long-branch */ + && TARGET_LONG_BRANCH && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype)) && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))) cum->call_cookie = CALL_LONG; @@ -3953,6 +4124,7 @@ function_arg_boundary (enum machine_mode mode, tree type ATTRIBUTE_UNUSED) else return PARM_BOUNDARY; } + /* Update the data in CUM to advance over an argument of mode MODE and data type TYPE. @@ -4224,7 +4396,49 @@ rs6000_mixed_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, k == 0 ? const0_rtx : GEN_INT (k*4)); return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k, rtlvec)); - } + } + else if (ALTIVEC_VECTOR_MODE(mode) && align_words <= (GP_ARG_NUM_REG - 1)) + { + /* Varargs vector regs must be saved in R5-R8 or R9-R10. */ + if (align_words == GP_ARG_NUM_REG - 2) + { + /* R9-R10 */ + return gen_rtx_PARALLEL (mode, + gen_rtvec (3, + gen_rtx_EXPR_LIST (VOIDmode, + NULL_RTX, const0_rtx), + gen_rtx_EXPR_LIST (VOIDmode, + gen_rtx_REG (SImode, + GP_ARG_MIN_REG + + align_words), + const0_rtx), + gen_rtx_EXPR_LIST (VOIDmode, + gen_rtx_REG (SImode, + GP_ARG_MIN_REG + + align_words+1), + GEN_INT(4)))); + } + else + { + /* R5-R8 */ + int k; + int size = int_size_in_bytes (type); + int no_units = ((size - 1) / 4) + 1; + int max_no_words = GP_ARG_NUM_REG - align_words; + int rtlvec_len = no_units < max_no_words ? no_units : max_no_words; + rtx *rtlvec = (rtx *) alloca (rtlvec_len * sizeof (rtx)); + memset ((char *) rtlvec, 0, rtlvec_len * sizeof (rtx)); + + for (k=0; k < rtlvec_len; k++) + rtlvec[k] = gen_rtx_EXPR_LIST (VOIDmode, + gen_rtx_REG (SImode, + GP_ARG_MIN_REG + + align_words + k), + k == 0 ? const0_rtx : GEN_INT (k*4)); + + return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rtlvec)); + } + } return NULL_RTX; } @@ -4343,7 +4557,11 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, is either wholly in GPRs or half in GPRs and half not. */ part_mode = DImode; - return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words); + if (TARGET_32BIT + && (TARGET_POWERPC64 || (align_words == GP_ARG_NUM_REG - 2))) + return rs6000_mixed_function_arg (cum, part_mode, type, align_words); + else + return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words); } } else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)) @@ -4619,6 +4837,33 @@ setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode, } } +/* APPLE LOCAL begin Altivec */ + +/* This routine determins if an extra pass over argument list is needed + for vector aruments. It returns true, if current argument need be + skipped. This depends on if we are in the first iteration (to skip + vectors), or 2nd iteration (to skip non-vectors). +*/ + +static +bool skip_vec_args(tree arg_type, int pass, int *last_pass) +{ + if (DEFAULT_ABI != ABI_DARWIN) + return false; + + if (TREE_CODE (arg_type) == VECTOR_TYPE) + { + *last_pass = 2; + if (pass == 1) + return true; + } + else if (pass == 2) + return true; + return false; +} +/* APPLE LOCAL end Altivec */ + + /* Create the va_list data type. */ static tree @@ -5885,6 +6130,7 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED, || arg2 == error_mark_node) return const0_rtx; + *expandedp = true; STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) & ~0x3) @@ -5902,7 +6148,6 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED, if (pat != 0) emit_insn (pat); - *expandedp = true; return NULL_RTX; } @@ -6456,6 +6701,18 @@ rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, static void rs6000_init_builtins (void) { + V2SI_type_node = build_vector_type (intSI_type_node, 2); + V2SF_type_node = build_vector_type (float_type_node, 2); + V4HI_type_node = build_vector_type (intHI_type_node, 4); + V4SI_type_node = build_vector_type (intSI_type_node, 4); + V4SF_type_node = build_vector_type (float_type_node, 4); + V8HI_type_node = build_vector_type (intHI_type_node, 8); + V16QI_type_node = build_vector_type (intQI_type_node, 16); + + unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16); + unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8); + unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4); + opaque_V2SI_type_node = copy_node (V2SI_type_node); opaque_V2SF_type_node = copy_node (V2SF_type_node); opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node); @@ -6486,10 +6743,10 @@ rs6000_init_builtins (void) get_identifier ("__pixel"), pixel_type_node)); - bool_V16QI_type_node = make_vector (V16QImode, bool_char_type_node, 1); - bool_V8HI_type_node = make_vector (V8HImode, bool_short_type_node, 1); - bool_V4SI_type_node = make_vector (V4SImode, bool_int_type_node, 1); - pixel_V8HI_type_node = make_vector (V8HImode, pixel_type_node, 1); + bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16); + bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8); + bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4); + pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8); (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL, get_identifier ("__vector unsigned char"), @@ -8648,7 +8905,11 @@ rs6000_got_register (rtx value ATTRIBUTE_UNUSED) static struct machine_function * rs6000_init_machine_status (void) { - return ggc_alloc_cleared (sizeof (machine_function)); + /* APPLE LOCAL begin volatile pic base reg in leaves */ + machine_function *mf = (machine_function *) ggc_alloc_cleared (sizeof (machine_function)); + mf->substitute_pic_base_reg = -1; + return mf; + /* APPLE LOCAL end volatile pic base reg in leaves */ } /* These macros test for integers and extract the low-order bits. */ @@ -9478,6 +9739,47 @@ print_operand_address (FILE *file, rtx x) abort (); } +/* APPLE LOCAL begin weak import */ +static void +find_weak_imports (rtx x) +{ + /* Patterns accepted here follow output_addr_const in final.c. */ + switch ( GET_CODE (x)) + { + case CONST: + case ZERO_EXTEND: + case SIGN_EXTEND: + case SUBREG: + find_weak_imports (XEXP (x, 0)); + break; + + case CONST_INT: + case CONST_DOUBLE: + case CODE_LABEL: + case LABEL_REF: + default: + break; + + case PLUS: + case MINUS: + find_weak_imports (XEXP (x, 0)); + find_weak_imports (XEXP (x, 1)); + break; + + case SYMBOL_REF: + if ( SYMBOL_REF_WEAK_IMPORT (x)) + { + fprintf (asm_out_file, "\t.weak_reference "); + assemble_name (asm_out_file, XSTR (x, 0)); + fprintf (asm_out_file, "\n"); + /* Attempt to prevent multiple weak_reference directives. */ + SYMBOL_REF_WEAK_IMPORT (x) = 0; + } + break; + } +} +/* APPLE LOCAL end weak import */ + /* Target hook for assembling integer objects. The PowerPC version has to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP is defined. It also needs to handle DI-mode objects on 64-bit @@ -9498,6 +9800,9 @@ rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p) if (TARGET_RELOCATABLE && !in_toc_section () && !in_text_section () + /* APPLE LOCAL begin hot/cold partitioning */ + && !in_text_unlikely_section () + /* APPLE LOCAL end hot/cold partitioning */ && !recurse && GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE @@ -9536,6 +9841,9 @@ rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p) } } #endif /* RELOCATABLE_NEEDS_FIXUP */ + /* APPLE LOCAL weak import */ + if (DEFAULT_ABI == ABI_DARWIN) + find_weak_imports (x); return default_assemble_integer (x, size, aligned_p); } @@ -10352,6 +10660,8 @@ first_reg_to_save (void) #if TARGET_MACHO if (flag_pic && current_function_uses_pic_offset_table + /* APPLE LOCAL volatile pic base reg in leaves */ + && cfun->machine->substitute_pic_base_reg == -1 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM) return RS6000_PIC_OFFSET_TABLE_REGNUM; #endif @@ -10540,7 +10850,7 @@ rs6000_stack_info (void) { static rs6000_stack_t info, zero_info; rs6000_stack_t *info_ptr = &info; - int reg_size = TARGET_POWERPC64 ? 8 : 4; + int reg_size = TARGET_32BIT ? 4 : 8; int ehrd_size; HOST_WIDE_INT total_raw_size; @@ -10652,7 +10962,7 @@ rs6000_stack_info (void) info_ptr->varargs_size = RS6000_VARARGS_AREA; info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8); info_ptr->parm_size = RS6000_ALIGN (current_function_outgoing_args_size, - 8); + TARGET_ALTIVEC ? 16 : 8); if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0) info_ptr->spe_gp_size = 8 * (32 - info_ptr->first_gp_reg_save); @@ -10689,7 +10999,7 @@ rs6000_stack_info (void) /* Align stack so vector save area is on a quadword boundary. */ if (info_ptr->altivec_size != 0) info_ptr->altivec_padding_size - = 16 - (-info_ptr->vrsave_save_offset % 16); + = (16 - (-info_ptr->vrsave_save_offset % 16)) % 16; else info_ptr->altivec_padding_size = 0; @@ -10768,10 +11078,10 @@ rs6000_stack_info (void) + ehrd_size + info_ptr->cr_size + info_ptr->lr_size - + info_ptr->vrsave_size + /* APPLE LOCAL fix redundant add? */ + info_ptr->toc_size, - (TARGET_ALTIVEC_ABI || ABI_DARWIN) - ? 16 : 8); + /* APPLE LOCAL darwin native */ + (TARGET_ALTIVEC_ABI ? 16 : 8)); total_raw_size = (info_ptr->vars_size + info_ptr->parm_size @@ -11055,6 +11365,9 @@ static bool rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) { tree type; + /* APPLE LOCAL -mlong-branch */ + if (TARGET_LONG_BRANCH) + return 0; if (decl) { if (TARGET_ALTIVEC_VRSAVE) @@ -11669,6 +11982,131 @@ generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep) return insn; } +/* APPLE LOCAL begin special ObjC method use of R12 */ +static int objc_method_using_pic = 0; + +/* Determine whether a name is an ObjC method. */ +static int name_encodes_objc_method_p (const char *piclabel_name) +{ + return (piclabel_name[0] == '*' && piclabel_name[1] == '"' + ? (piclabel_name[2] == 'L' + && (piclabel_name[3] == '+' || piclabel_name[3] == '-')) + : (piclabel_name[1] == 'L' + && (piclabel_name[2] == '+' || piclabel_name[2] == '-'))); +} +/* APPLE LOCAL end special ObjC method use of R12 */ + +/* APPLE LOCAL begin recompute PIC register use */ +/* Sometimes a function has references that require the PIC register, + but optimization removes them all. To catch this case + recompute current_function_uses_pic_offset_table here. + This may allow us to eliminate the prologue and epilogue. */ + +static int +recompute_PIC_register_use (void) +{ + if (DEFAULT_ABI == ABI_DARWIN + && flag_pic && current_function_uses_pic_offset_table + && !cfun->machine->ra_needs_full_frame) + { + rtx insn; + current_function_uses_pic_offset_table = 0; + push_topmost_sequence (); + for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn)) + if ( reg_mentioned_p (pic_offset_table_rtx, insn)) + { + current_function_uses_pic_offset_table = 1; + break; + } + pop_topmost_sequence (); + } + return 0; +} +/* APPLE LOCAL end recompute PIC register use */ + +/* APPLE LOCAL begin volatile pic base reg in leaves */ +/* If this is a leaf function and we used any pic-based references, + see if there is an unused volatile reg we can use instead of R31. + If so set substitute_pic_base_reg to this reg, set its reg_ever_used + bit (to avoid confusing later calls to alloc_volatile_reg), and + make a pass through the existing RTL, substituting the new reg for + the old one wherever it appears. + Logically this is a void function; it is int so it can be used to + initialize a dummy variable, thus getting executed ahead of other + initializations. Technicolour yawn. */ + +/* ALLOC_VOLATILE_REG allocates a volatile register AFTER all gcc + register allocations have been done; we use it to reserve an + unused reg for holding VRsave. Returns -1 in case of failure (all + volatile regs are in use.) */ +/* Note, this is called from both the prologue and epilogue code, + with the assumption that it will return the same result both + times! Since the register arrays are not changed in between + this is valid, if a bit fragile. */ +/* In future we may also use this to grab an unused volatile reg to + hold the PIC base reg in the event that the current function makes + no procedure calls; this was done in 2.95. */ +static int +alloc_volatile_reg (void) +{ + if (current_function_is_leaf + && reload_completed + && !cfun->machine->ra_needs_full_frame) + { + int r; + for (r = 10; r >= 2; --r) + if (! fixed_regs[r] && ! regs_ever_live[r]) + return r; + } + + return -1; /* fail */ +} + +static int +try_leaf_pic_optimization (void) +{ + if ( DEFAULT_ABI==ABI_DARWIN + && flag_pic && current_function_uses_pic_offset_table + && current_function_is_leaf + && !cfun->machine->ra_needs_full_frame ) + { + int reg = alloc_volatile_reg (); + if ( reg != -1 ) + { + /* Run through the insns, changing references to the original + PIC_OFFSET_TABLE_REGNUM to our new one. */ + rtx insn; + const int nregs = PIC_OFFSET_TABLE_REGNUM + 1; + rtx *reg_map = (rtx *) xmalloc (nregs * sizeof (rtx)); + memset (reg_map, 0, nregs * sizeof (rtx)); + reg_map[PIC_OFFSET_TABLE_REGNUM] = gen_rtx_REG (SImode, reg); + + push_topmost_sequence (); + for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn)) + { + if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN) + { + replace_regs (PATTERN (insn), reg_map, nregs, 1); + replace_regs (REG_NOTES (insn), reg_map, nregs, 1); + } + else if (GET_CODE (insn) == CALL_INSN) + { + if ( !SIBLING_CALL_P (insn)) + abort (); + } + } + pop_topmost_sequence (); + free (reg_map); + + regs_ever_live[reg] = 1; + regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 0; + cfun->machine->substitute_pic_base_reg = reg; + } + } + return 0; +} +/* APPLE LOCAL end volatile pic base reg in leaves */ + /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes. Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */ @@ -11738,9 +12176,13 @@ gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset) void rs6000_emit_prologue (void) { + /* APPLE LOCAL recompute PIC register use */ + int dummy ATTRIBUTE_UNUSED = recompute_PIC_register_use (); + /* APPLE LOCAL volatile pic base reg in leaves */ + int ignored ATTRIBUTE_UNUSED = try_leaf_pic_optimization (); rs6000_stack_t *info = rs6000_stack_info (); enum machine_mode reg_mode = Pmode; - int reg_size = UNITS_PER_WORD; + int reg_size = TARGET_32BIT ? 4 : 8; rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM); rtx frame_ptr_rtx = gen_rtx_REG (Pmode, 12); rtx frame_reg_rtx = sp_reg_rtx; @@ -11749,7 +12191,24 @@ rs6000_emit_prologue (void) int saving_FPRs_inline; int using_store_multiple; HOST_WIDE_INT sp_offset = 0; + /* APPLE LOCAL: callers_lr_already_saved */ + int callers_lr_already_saved = 0; +#if TARGET_MACHO + int lr_already_set_up_for_pic = 0; +#endif + /* APPLE LOCAL special ObjC method use of R12 */ + objc_method_using_pic = 0; + /* APPLE LOCAL BEGIN fix-and-continue mrs */ + if (TARGET_FIX_AND_CONTINUE) + { + emit_insn (gen_nop ()); + emit_insn (gen_nop ()); + emit_insn (gen_nop ()); + emit_insn (gen_nop ()); + } + /* APPLE LOCAL END fix-and-continue mrs */ + if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0) { reg_mode = V2SImode; @@ -11785,6 +12244,31 @@ rs6000_emit_prologue (void) rs6000_emit_stack_tie (); } + /* APPLE LOCAL begin special ObjC method use of R12 */ +#if TARGET_MACHO + if (DEFAULT_ABI == ABI_DARWIN + && current_function_uses_pic_offset_table && flag_pic) + { + const char *piclabel_name = machopic_function_base_name (); + + if (name_encodes_objc_method_p (piclabel_name) + /* If we're saving vector or FP regs via a function call, + then don't bother with this ObjC R12 optimization. + This test also eliminates world_save. */ + && (info->first_altivec_reg_save > LAST_ALTIVEC_REGNO + || VECTOR_SAVE_INLINE (info->first_altivec_reg_save)) + && (info->first_fp_reg_save == 64 + || FP_SAVE_INLINE (info->first_fp_reg_save))) + { + /* We cannot output the label now; there seems to be no + way to prevent cfgcleanup from deleting it. It is done + in rs6000_output_function_prologue with fprintf! */ + objc_method_using_pic = 1; + } + } +#endif /* TARGET_MACHO */ + /* APPLE LOCAL end special ObjC method use of R12 */ + /* Save AltiVec registers if needed. */ if (TARGET_ALTIVEC_ABI && info->altivec_size != 0) { @@ -11862,7 +12346,12 @@ rs6000_emit_prologue (void) /* If we need to save CR, put it into r12. */ if (info->cr_save_p && frame_reg_rtx != frame_ptr_rtx) { - cr_save_rtx = gen_rtx_REG (SImode, 12); + /* APPLE LOCAL begin special ObjC method use of R12 */ + /* For Darwin, use R2, so we don't clobber the special ObjC + method use of R12. R11 has a special meaning for Ada, so we + can't use that. */ + cr_save_rtx = gen_rtx_REG (SImode, DEFAULT_ABI == ABI_DARWIN ? 2 : 12); + /* APPLE LOCAL end special ObjC method use of R12 */ emit_insn (gen_movesi_from_cr (cr_save_rtx)); } @@ -11885,17 +12374,57 @@ rs6000_emit_prologue (void) char rname[30]; const char *alloc_rname; rtvec p; - p = rtvec_alloc (2 + 64 - info->first_fp_reg_save); + + /* APPLE LOCAL begin Reduce code size / improve performance */ + int gen_following_label = 0; + int count = 0; + + if (current_function_uses_pic_offset_table && flag_pic +#ifdef INSN_SCHEDULING + /* Prevent the compiler from crashing + while scheduling insns after global_alloc! */ + && (optimize == 0 || !flag_schedule_insns_after_reload) +#endif + /* If this is the last CALL in the prolog, then we've got our PC. + If we're saving AltiVec regs via a function, we're not last. */ + && (info->first_altivec_reg_save > LAST_ALTIVEC_REGNO + || VECTOR_SAVE_INLINE (info->first_altivec_reg_save))) + gen_following_label = lr_already_set_up_for_pic = 1; + + /* APPLE LOCAL: +2 (could be conditionalized) */ + p = rtvec_alloc (2 + 64 - info->first_fp_reg_save + 2 + + gen_following_label); - RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode, + RTVEC_ELT (p, count++) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)); + /* APPLE LOCAL begin reduce code size */ +#if TARGET_MACHO + /* We have to calculate the offset into saveFP to where we must + call (!!) SAVEFP also saves the caller's LR -- placed into + R0 above -- into 8(R1). SAVEFP/RESTOREFP should never be + called to save or restore only F31. */ + + if (info->lr_save_offset != 8 || info->first_fp_reg_save == 63) + abort (); + + sprintf (rname, "*saveFP%s%.0d ; save f%d-f31", + (info->first_fp_reg_save - 32 == 14 ? "" : "+"), + (info->first_fp_reg_save - 46) * 4, + info->first_fp_reg_save - 32); +#else + /* APPLE LOCAL end reduce code size */ sprintf (rname, "%s%d%s", SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX); + /* APPLE LOCAL reduce code size */ +#endif /* TARGET_MACHO */ alloc_rname = ggc_strdup (rname); - RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode, + RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname)); + /* APPLE LOCAL reduce code size */ + if ( gen_following_label ) + RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode, const0_rtx); for (i = 0; i < 64 - info->first_fp_reg_save; i++) { rtx addr, reg, mem; @@ -11906,11 +12435,31 @@ rs6000_emit_prologue (void) mem = gen_rtx_MEM (DFmode, addr); set_mem_alias_set (mem, rs6000_sr_alias_set); - RTVEC_ELT (p, i + 2) = gen_rtx_SET (VOIDmode, mem, reg); + RTVEC_ELT (p, count++) = gen_rtx_SET (VOIDmode, mem, reg); } + /* APPLE LOCAL begin fix 2866661 */ +#if TARGET_MACHO + /* Darwin version of these functions stores R0. */ + RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0)); + + /* If we saved LR, *tell* people about it! */ + if (info->lr_save_p) + { + rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, + GEN_INT (info->lr_save_offset + sp_offset)); + rtx mem = gen_rtx_MEM (Pmode, addr); + /* This should not be of rs6000_sr_alias_set, because of + __builtin_return_address. */ + RTVEC_ELT (p, count++) = gen_rtx_SET (Pmode, mem, + gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)); + } +#endif + /* APPLE LOCAL end fix 2866661 */ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p)); rs6000_frame_related (insn, frame_ptr_rtx, info->total_size, NULL_RTX, NULL_RTX); + /* APPLE LOCAL: callers_lr_already_saved */ + callers_lr_already_saved = 1; } /* Save GPRs. This is done as a PARALLEL if we are using @@ -11945,7 +12494,11 @@ rs6000_emit_prologue (void) && ! call_used_regs[info->first_gp_reg_save+i]) || (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0) - || (DEFAULT_ABI == ABI_DARWIN && flag_pic)))) + /* APPLE LOCAL begin volatile pic base reg in leaves */ + || (DEFAULT_ABI == ABI_DARWIN && flag_pic + && current_function_uses_pic_offset_table + && cfun->machine->substitute_pic_base_reg == -1)))) + /* APPLE LOCAL end volatile pic base reg in leaves */ { rtx addr, reg, mem; reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i); @@ -12027,8 +12580,18 @@ rs6000_emit_prologue (void) } } + /* APPLE LOCAL special ObjC method use of R12 */ + if (objc_method_using_pic) + rs6000_maybe_dead ( + emit_move_insn (gen_rtx_REG (Pmode, + cfun->machine->substitute_pic_base_reg == -1 + ? PIC_OFFSET_TABLE_REGNUM + : cfun->machine->substitute_pic_base_reg), + gen_rtx_REG (Pmode, 12))); + /* Save lr if we used it. */ - if (info->lr_save_p) + /* APPLE LOCAL: callers_lr_already_saved */ + if (info->lr_save_p && !callers_lr_already_saved) { rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, GEN_INT (info->lr_save_offset + sp_offset)); @@ -12111,17 +12674,38 @@ rs6000_emit_prologue (void) #if TARGET_MACHO if (DEFAULT_ABI == ABI_DARWIN + /* APPLE LOCAL special ObjC method use of R12 */ + && !objc_method_using_pic && flag_pic && current_function_uses_pic_offset_table) { rtx dest = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM); const char *picbase = machopic_function_base_name (); rtx src = gen_rtx_SYMBOL_REF (Pmode, picbase); - rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (dest, src))); + /* APPLE LOCAL begin save and restore LR */ + /* Save and restore LR locally around this call (in R0). */ + if (!info->lr_save_p) + rs6000_maybe_dead (emit_move_insn (gen_rtx_REG (Pmode, 0), dest)); + /* APPLE LOCAL end save and restore LR */ + /* APPLE LOCAL begin performance enhancement */ +#if TARGET_MACHO + if (!lr_already_set_up_for_pic) + rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (dest, src))); +#endif + /* APPLE LOCAL end performance enhancement */ + + /* APPLE LOCAL begin volatile pic base reg in leaves */ rs6000_maybe_dead ( - emit_move_insn (gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM), - gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM))); + emit_move_insn ( + gen_rtx_REG (Pmode, + cfun->machine->substitute_pic_base_reg == -1 + ? RS6000_PIC_OFFSET_TABLE_REGNUM + : cfun->machine->substitute_pic_base_reg), + dest)); + if (!info->lr_save_p) + rs6000_maybe_dead (emit_move_insn (dest, gen_rtx_REG (Pmode, 0))); + /* APPLE LOCAL end */ } #endif } @@ -12137,6 +12721,8 @@ rs6000_output_function_prologue (FILE *file, if (TARGET_DEBUG_STACK) debug_stack_info (info); + /* APPLE LOCAL do not extern fp save/restore */ +#if !TARGET_MACHO /* Write .extern for any function we will call to save and restore fp values. */ if (info->first_fp_reg_save < 64 @@ -12145,6 +12731,8 @@ rs6000_output_function_prologue (FILE *file, SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX, RESTORE_FP_PREFIX, info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX); + /* APPLE LOCAL do not extern fp save/restore */ +#endif /* !TARGET_MACHO */ /* Write .extern for AIX common mode routines, if needed. */ if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined) @@ -12158,6 +12746,16 @@ rs6000_output_function_prologue (FILE *file, common_mode_defined = 1; } + /* APPLE LOCAL special ObjC method use of R12 */ +#if TARGET_MACHO + if ( HAVE_prologue && DEFAULT_ABI == ABI_DARWIN && objc_method_using_pic ) + { + /* APPLE FIXME isn't there an asm macro to do all this? */ + const char* piclabel = machopic_function_base_name (); + fprintf(file, "%s:\n", (*piclabel == '*') ? piclabel + 1 : piclabel); + } +#endif + if (! HAVE_prologue) { start_sequence (); @@ -12207,7 +12805,7 @@ rs6000_emit_epilogue (int sibcall) rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1); rtx frame_reg_rtx = sp_reg_rtx; enum machine_mode reg_mode = Pmode; - int reg_size = UNITS_PER_WORD; + int reg_size = TARGET_32BIT ? 4 : 8; int i; info = rs6000_stack_info (); @@ -12232,6 +12830,8 @@ rs6000_emit_epilogue (int sibcall) using_mfcr_multiple = (rs6000_cpu == PROCESSOR_PPC601 || rs6000_cpu == PROCESSOR_PPC603 || rs6000_cpu == PROCESSOR_PPC750 + /* APPLE LOCAL ? */ + || rs6000_cpu == PROCESSOR_PPC7400 || optimize_size); /* If we have a frame pointer, a call to alloca, or a large stack @@ -12324,7 +12924,9 @@ rs6000_emit_epilogue (int sibcall) set_mem_alias_set (mem, rs6000_sr_alias_set); - emit_move_insn (gen_rtx_REG (SImode, 12), mem); + /* APPLE LOCAL use R11 because of ObjC use of R12 in sibcall to CTR */ + emit_move_insn (gen_rtx_REG (SImode, + DEFAULT_ABI == ABI_DARWIN ? 11 : 12), mem); } /* Set LR here to try to overlap restores below. */ @@ -12394,7 +12996,11 @@ rs6000_emit_epilogue (int sibcall) && ! call_used_regs[info->first_gp_reg_save+i]) || (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0) - || (DEFAULT_ABI == ABI_DARWIN && flag_pic)))) + /* APPLE LOCAL begin darwin native */ + || (DEFAULT_ABI == ABI_DARWIN && flag_pic + && current_function_uses_pic_offset_table + && cfun->machine->substitute_pic_base_reg == -1)))) + /* APPLE LOCAL end darwin native */ { rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, GEN_INT (info->gp_save_offset @@ -12448,7 +13054,9 @@ rs6000_emit_epilogue (int sibcall) /* If we saved cr, restore it here. Just those that were used. */ if (info->cr_save_p) { - rtx r12_rtx = gen_rtx_REG (SImode, 12); + /* APPLE LOCAL use R11 because of ObjC use of R12 in sibcall to CTR */ + /* APPLE LOCAL silly name retained to minimize deviation from FSF */ + rtx r12_rtx = gen_rtx_REG (SImode, DEFAULT_ABI == ABI_DARWIN ? 11 : 12); int count = 0; if (using_mfcr_multiple) @@ -12548,8 +13156,25 @@ rs6000_emit_epilogue (int sibcall) char rname[30]; const char *alloc_rname; + /* APPLE LOCAL begin code size reduction / performance enhancement */ +#if TARGET_MACHO + /* We have to calculate the offset into RESTFP to where we must + call (!!) RESTFP also restores the caller's LR from 8(R1). + RESTFP should *never* be called to restore only F31. */ + + if (info->lr_save_offset != 8 || info->first_fp_reg_save == 63) + abort (); + + sprintf (rname, "*restFP%s%.0d ; restore f%d-f31", + (info->first_fp_reg_save - 32 == 14 ? "" : "+"), + (info->first_fp_reg_save - 46) * 4, + info->first_fp_reg_save - 32); +#else + /* APPLE LOCAL end code size reduction / performance enhancement */ sprintf (rname, "%s%d%s", RESTORE_FP_PREFIX, info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX); + /* APPLE LOCAL code size reduction / performance enhancement */ +#endif /* TARGET_MACHO */ alloc_rname = ggc_strdup (rname); RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, @@ -14663,6 +15288,11 @@ rs6000_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt) const struct attribute_spec rs6000_attribute_table[] = { /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ + /* APPLE LOCAL begin double destructor */ +#ifdef SUBTARGET_ATTRIBUTE_TABLE + SUBTARGET_ATTRIBUTE_TABLE +#endif + /* APPLE LOCAL end double destructor */ { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute }, { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute }, { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute }, @@ -14756,6 +15386,21 @@ rs6000_handle_altivec_attribute (tree *node, tree name, tree args, return NULL_TREE; } +/* AltiVec defines four built-in scalar types that serve as vector + elements; we must teach the compiler how to mangle them. */ + +static const char * +rs6000_mangle_fundamental_type (tree type) +{ + if (type == bool_char_type_node) return "U6__boolc"; + if (type == bool_short_type_node) return "U6__bools"; + if (type == pixel_type_node) return "u7__pixel"; + if (type == bool_int_type_node) return "U6__booli"; + + /* For all other types, use normal C++ mangling. */ + return NULL; +} + /* Handle a "longcall" or "shortcall" attribute; arguments as in struct attribute_spec.handler. */ @@ -14998,6 +15643,7 @@ symbolic_operand (rtx op) #if TARGET_MACHO static tree branch_island_list = 0; +static int local_label_unique_number = 0; /* Remember to generate a branch island for far calls to the given function. */ @@ -15027,17 +15673,20 @@ macho_branch_islands (void) { char tmp_buf[512]; tree branch_island; + const char *name; + const char *label; + char name_buf[512]; + char *local_label_0; + const char *non_lazy_pointer_name, *unencoded_non_lazy_pointer_name; + int length; for (branch_island = branch_island_list; branch_island; branch_island = TREE_CHAIN (branch_island)) { - const char *label = - IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island)); - const char *name = - darwin_strip_name_encoding ( - IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island))); - char name_buf[512]; + label = IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island)); + name = darwin_strip_name_encoding ( + IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island))); /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */ if (name[0] == '*' || name[0] == '&') strcpy (name_buf, name+1); @@ -15053,15 +15702,66 @@ macho_branch_islands (void) fprintf (asm_out_file, "\t.stabd 68,0," HOST_WIDE_INT_PRINT_UNSIGNED "\n", BRANCH_ISLAND_LINE_NUMBER(branch_island)); #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */ - if (flag_pic) + /* If PIC and the callee has no stub, do an indirect call through a + non-lazy-pointer. 'save_world' expects a parameter in R11; + theh dyld_stub_binding_helper (part of the Mach-O stub + interface) expects a different parameter in R11. This is + effectively a "non-lazy stub." By-the-way, a + "non-lazy-pointer" is a .long that gets coalesced with others + of the same value, so one NLP suffices for an entire + application. */ + if (flag_pic && (machopic_classify_ident (get_identifier (name)) == MACHOPIC_UNDEFINED)) + { + /* This is the address of the non-lazy pointer; load from it + to get the address we want. */ + non_lazy_pointer_name = machopic_non_lazy_ptr_name (name); + machopic_validate_stub_or_non_lazy_ptr (non_lazy_pointer_name, + /* non-lazy-pointer */0); + unencoded_non_lazy_pointer_name = + (*targetm.strip_name_encoding) (non_lazy_pointer_name); + length = strlen (name); + local_label_0 = alloca (length + 32); + /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */ + if (name[0] == '*' || name[0] == '&') + strcpy (name_buf, name+1); + else + { + name_buf[0] = '_'; + strcpy (name_buf+1, name); + } + + sprintf (local_label_0, "%s_%d_pic", name_buf, local_label_unique_number); + local_label_unique_number++; + strcpy (tmp_buf, "\n"); + strcat (tmp_buf, label); + strcat (tmp_buf, "\tmflr r0\n"); + strcat (tmp_buf, "\tbcl 20,31,"); + strcat (tmp_buf, "\tbcl 20,31,%s\n"); + strcat (tmp_buf, local_label_0); + strcat (tmp_buf, ":\n"); + strcat (tmp_buf, "\tmflr r12\n"); + strcat (tmp_buf, "\taddis r12,r12,ha16("); + strcat (tmp_buf, non_lazy_pointer_name); + strcat (tmp_buf, "-"); + strcat (tmp_buf, local_label_0); + strcat (tmp_buf, ")\n\tlwz r12,lo16("); + strcat (tmp_buf, non_lazy_pointer_name); + strcat (tmp_buf, "-"); + strcat (tmp_buf, local_label_0); + strcat (tmp_buf, ")(r12)\n"); + strcat (tmp_buf, "\tmtlr r0\n"); + strcat (tmp_buf, "\tmtctr r12\n"); + strcat (tmp_buf, "\tbctr\n"); + } + else if (flag_pic) { strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,"); strcat (tmp_buf, label); strcat (tmp_buf, "_pic\n"); strcat (tmp_buf, label); - strcat (tmp_buf, "_pic:\n\tmflr r11\n"); + strcat (tmp_buf, "_pic:\n\tmflr r12\n"); - strcat (tmp_buf, "\taddis r11,r11,ha16("); + strcat (tmp_buf, "\taddis r12,r12,ha16("); strcat (tmp_buf, name_buf); strcat (tmp_buf, " - "); strcat (tmp_buf, label); @@ -15069,7 +15769,7 @@ macho_branch_islands (void) strcat (tmp_buf, "\tmtlr r0\n"); - strcat (tmp_buf, "\taddi r12,r11,lo16("); + strcat (tmp_buf, "\taddi r12,r12,lo16("); strcat (tmp_buf, name_buf); strcat (tmp_buf, " - "); strcat (tmp_buf, label); @@ -15135,12 +15835,55 @@ char * output_call (rtx insn, rtx *operands, int dest_operand_number, int cookie_operand_number) { static char buf[256]; + const char *far_call_instr_str=NULL, *near_call_instr_str=NULL; + rtx pattern; + + switch (GET_CODE (insn)) + { + case CALL_INSN: + far_call_instr_str = "jbsr"; + near_call_instr_str = "bl"; + pattern = NULL_RTX; + break; + case JUMP_INSN: + far_call_instr_str = "jmp"; + near_call_instr_str = "b"; + pattern = NULL_RTX; + break; + case INSN: + pattern = PATTERN (insn); + break; + default: + abort(); + break; + } + if (GET_CODE (operands[dest_operand_number]) == SYMBOL_REF && (INTVAL (operands[cookie_operand_number]) & CALL_LONG)) { tree labelname; tree funname = get_identifier (XSTR (operands[dest_operand_number], 0)); + /* This insn represents a prologue or epilogue. */ + if ((pattern != NULL_RTX) && GET_CODE (pattern) == PARALLEL) + { + rtx parallel_first_op = XVECEXP (pattern, 0, 0); + switch (GET_CODE (parallel_first_op)) + { + case CLOBBER: /* Prologue: a call to save_world. */ + far_call_instr_str = "jbsr"; + near_call_instr_str = "bl"; + break; + case RETURN: /* Epilogue: a call to rest_world. */ + far_call_instr_str = "jmp"; + near_call_instr_str = "b"; + break; + default: + abort(); + break; + } + } + if (no_previous_def (funname)) { int line_number = 0; @@ -15303,6 +16046,129 @@ toc_section (void) #endif /* TARGET_MACHO */ +/* APPLE LOCAL begin Macintosh alignment 2002-1-22 ff */ +/* Return the alignment of a struct based on the Macintosh PowerPC + alignment rules. In general the alignment of a struct is + determined by the greatest alignment of its elements. However, the + PowerPC rules cause the alignment of a struct to peg at word + alignment except when the first field has greater than word + (32-bit) alignment, in which case the alignment is determined by + the alignment of the first field. */ + +unsigned +round_type_align (tree the_struct, unsigned computed, unsigned specified) +{ + if (TARGET_ALTIVEC && TREE_CODE (the_struct) == VECTOR_TYPE) + { + /* All vectors are (at least) 16-byte aligned. A struct or + union with a vector element is also 16-byte aligned. */ + return MAX (RS6000_VECTOR_ALIGNMENT, MAX (computed, specified)); + } + + if (TREE_CODE (the_struct) == RECORD_TYPE + || TREE_CODE (the_struct) == UNION_TYPE + || TREE_CODE (the_struct) == QUAL_UNION_TYPE) + { + tree first_field = TYPE_FIELDS (the_struct); + + /* Skip past static fields, enums, and constant fields that are + not really a part of the record layout. */ + while ((first_field != 0) + && (TREE_CODE (first_field) != FIELD_DECL)) + first_field = TREE_CHAIN (first_field); + + if (first_field != 0) + { + /* If other-than-default alignment (which includes mac68k + mode) is in effect, then no adjustments to the alignment + should be necessary. Ditto if the struct has the + __packed__ attribute. */ + if (TYPE_PACKED (the_struct) || TARGET_ALIGN_MAC68K + || TARGET_ALIGN_NATURAL || maximum_field_alignment != 0) + /* Do nothing */ ; + else + { + /* The following code handles Macintosh PowerPC + alignment. The implementation is complicated by the + fact that BIGGEST_ALIGNMENT is 128 when AltiVec is + enabled and 32 when it is not. So when AltiVec is + not enabled, alignment is generally limited to word + alignment. Consequently, the alignment of unions has + to be recalculated if AltiVec is not enabled. + + Below we explicitly test for fields with greater than + word alignment: doubles, long longs, and structs and + arrays with greater than word alignment. */ + unsigned val; + tree field_type; + + val = MAX (computed, specified); + + if (TREE_CODE (the_struct) == UNION_TYPE && !TARGET_ALTIVEC) + { + tree field = first_field; + + while (field != 0) + { + /* Don't consider statics, enums and constant fields + which are not really a part of the record. */ + if (TREE_CODE (field) != FIELD_DECL) + { + field = TREE_CHAIN (field); + continue; + } + field_type = TREE_TYPE(field); + if (TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE) + field_type = get_inner_array_type (field); + else + field_type = TREE_TYPE (field); + val = MAX (TYPE_ALIGN (field_type), val); + if (FLOAT_TYPE_P (field_type) + && TYPE_MODE (field_type) == DFmode) + val = MAX (RS6000_DOUBLE_ALIGNMENT, val); + else if (INTEGRAL_TYPE_P (field_type) + && TYPE_MODE (field_type) == DImode) + val = MAX (RS6000_LONGLONG_ALIGNMENT, val); + field = TREE_CHAIN (field); + } + } + else + { + if (TREE_CODE (TREE_TYPE (first_field)) == ARRAY_TYPE) + field_type = get_inner_array_type (first_field); + else + field_type = TREE_TYPE (first_field); + + if (field_type == error_mark_node) + return val; + val = MAX (TYPE_ALIGN (field_type), val); + + if (FLOAT_TYPE_P (field_type) + && TYPE_MODE (field_type) == DFmode) + val = MAX (RS6000_DOUBLE_ALIGNMENT, val); + else if (INTEGRAL_TYPE_P (field_type) + && TYPE_MODE (field_type) == DImode) + val = MAX (RS6000_LONGLONG_ALIGNMENT, val); + } + + return val; + } + } /* first_field != 0 */ + + /* Ensure all MAC68K structs are at least 16-bit aligned. + Unless the struct has __attribute__ ((packed)). */ + + if (TARGET_ALIGN_MAC68K && ! TYPE_PACKED (the_struct)) + { + if (computed < 16) + computed = 16; + } + } /* RECORD_TYPE, etc */ + + return (MAX (computed, specified)); +} +/* APPLE LOCAL end Macintosh alignment 2002-1-22 ff */ + #if TARGET_ELF static unsigned int rs6000_elf_section_type_flags (tree decl, const char *name, int reloc) diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h index 641e4bbb8af..cbdb74a6b7f 100644 --- a/gcc/config/rs6000/rs6000.h +++ b/gcc/config/rs6000/rs6000.h @@ -23,6 +23,9 @@ /* Note that some other tm.h files include this one and then override many of the definitions. */ +/* APPLE LOCAL fat builds */ +#define DEFAULT_TARGET_ARCH "ppc" + /* Definitions for the object file format. These are set at compile-time. */ @@ -109,8 +112,10 @@ program. Do not define this macro if it does not need to do anything. */ - + +#ifndef SUBTARGET_EXTRA_SPECS #define SUBTARGET_EXTRA_SPECS +#endif #define EXTRA_SPECS \ { "cpp_default", CPP_DEFAULT_SPEC }, \ @@ -197,6 +202,15 @@ extern int target_flags; 0x00100000, and sysv4.h uses 0x00800000 -> 0x40000000. 0x80000000 is not available because target_flags is signed. */ +/* APPLE LOCAL long-branch */ +/* gen call addr in register for >64M range */ +#define MASK_LONG_BRANCH 0x02000000 + +/* APPLE LOCAL BEGIN fix-and-continue mrs */ +#define MASK_FIX_AND_CONTINUE 0x04000000 +#define MASK_INDIRECT_ALL_DATA 0x08000000 +/* APPLE LOCAL END fix-and-continue mrs */ + #define TARGET_POWER (target_flags & MASK_POWER) #define TARGET_POWER2 (target_flags & MASK_POWER2) #define TARGET_POWERPC (target_flags & MASK_POWERPC) @@ -215,6 +229,8 @@ extern int target_flags; #define TARGET_SCHED_PROLOG (target_flags & MASK_SCHED_PROLOG) #define TARGET_ALTIVEC (target_flags & MASK_ALTIVEC) #define TARGET_AIX_STRUCT_RET (target_flags & MASK_AIX_STRUCT_RET) +/* APPLE LOCAL long-branch */ +#define TARGET_LONG_BRANCH (target_flags & MASK_LONG_BRANCH) /* Define TARGET_MFCRF if the target assembler supports the optional field operand for mfcr and the target processor supports the @@ -226,7 +242,6 @@ extern int target_flags; #define TARGET_MFCRF 0 #endif - #define TARGET_32BIT (! TARGET_64BIT) #define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT) #define TARGET_UPDATE (! TARGET_NO_UPDATE) @@ -248,6 +263,10 @@ extern int target_flags; #endif #define TARGET_XL_CALL 0 +/* APPLE LOCAL BEGIN fix-and-continue mrs */ +#define TARGET_FIX_AND_CONTINUE (target_flags & MASK_FIX_AND_CONTINUE) +#define TARGET_INDIRECT_ALL_DATA (target_flags & MASK_INDIRECT_ALL_DATA) +/* APPLE LOCAL END fix-and-continue mrs */ /* Run-time compilation parameters selecting different hardware subsets. @@ -346,6 +365,23 @@ extern int target_flags; ""}, \ {"no-svr4-struct-return", MASK_AIX_STRUCT_RET, \ ""}, \ + /* APPLE LOCAL long-branch */ \ + {"long-branch", MASK_LONG_BRANCH, \ + N_("Generate 32-bit call addresses (range > 64M)")}, \ + {"no-long-branch", -MASK_LONG_BRANCH, ""}, \ + {"longcall", MASK_LONG_BRANCH, \ + N_("Generate 32-bit call addresses (range > 64M)")}, \ + {"no-longcall", -MASK_LONG_BRANCH, ""}, \ + /* APPLE LOCAL BEGIN fix-and-continue mrs */ \ + {"fix-and-continue", MASK_FIX_AND_CONTINUE, \ + N_("Generate code suitable for fast turn around debugging")}, \ + {"no-fix-and-continue", -MASK_FIX_AND_CONTINUE, \ + N_("Don't generate code suitable for fast turn around debugging")},\ + {"indirect-data", MASK_INDIRECT_ALL_DATA, \ + N_("Generate code suitable for fast turn around debugging")}, \ + {"no-indirect-data", -MASK_INDIRECT_ALL_DATA, \ + N_("Don't generate code suitable for fast turn around debugging")},\ + /* APPLE LOCAL END fix-and-continue mrs */ \ {"mfcrf", MASK_MFCRF, \ N_("Generate single field mfcr instruction")}, \ {"no-mfcrf", - MASK_MFCRF, \ @@ -539,6 +575,10 @@ extern const char *rs6000_warn_altivec_long_switch; #define MASK_ALIGN_POWER 0x00000000 #define MASK_ALIGN_NATURAL 0x00000001 #define TARGET_ALIGN_NATURAL (rs6000_alignment_flags & MASK_ALIGN_NATURAL) +/* APPLE LOCAL begin Macintosh alignment 2002-2-26 ff */ +#define MASK_ALIGN_MAC68K 0x00000002 +#define TARGET_ALIGN_MAC68K (rs6000_alignment_flags & MASK_ALIGN_MAC68K) +/* APPLE LOCAL end Macintosh alignment 2002-2-26 ff */ #else #define TARGET_ALIGN_NATURAL 0 #endif @@ -728,6 +768,13 @@ extern const char *rs6000_warn_altivec_long_switch; /* Allocation boundary (in *bits*) for the code of a function. */ #define FUNCTION_BOUNDARY 32 +/* Constants for alignment macros below. */ +/* APPLE LOCAL begin Macintosh alignment */ +#define RS6000_DOUBLE_ALIGNMENT 64 +#define RS6000_LONGLONG_ALIGNMENT 64 +#define RS6000_VECTOR_ALIGNMENT 128 +/* APPLE LOCAL end Macintosh alignment */ + /* No data type wants to be aligned rounder than this. */ #define BIGGEST_ALIGNMENT 128 @@ -1047,6 +1094,9 @@ extern const char *rs6000_warn_altivec_long_switch; ((TARGET_SPE && SPE_VECTOR_MODE (MODE)) \ || (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (MODE))) +#define UNITS_PER_SIMD_WORD \ + (TARGET_ALTIVEC ? 16 : (TARGET_SPE ? 8 : 0) ) + /* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. For POWER and PowerPC, the GPRs can hold any mode, but values bigger than one register cannot go past R31. The float @@ -1162,8 +1212,7 @@ extern const char *rs6000_warn_altivec_long_switch; = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \ if (DEFAULT_ABI == ABI_DARWIN \ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \ - global_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \ - = fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \ + fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \ = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \ if (TARGET_ALTIVEC) \ @@ -1528,6 +1577,9 @@ extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */ makes the stack pointer a smaller address. */ #define STACK_GROWS_DOWNWARD +/* Offsets recorded in opcodes are a multiple of this alignment factor. */ +#define DWARF_CIE_DATA_ALIGNMENT (-((int) (TARGET_32BIT ? 4 : 8))) + /* Define this if the nominal address of the stack frame is at the high-address end of the local variables; that is, each additional local variable allocated @@ -1717,6 +1769,9 @@ typedef struct machine_function GTY(()) int sysv_varargs_p; /* Flags if __builtin_return_address (n) with n >= 1 was used. */ int ra_needs_full_frame; + /* APPLE LOCAL volatile pic base reg in leaves */ + /* Substitute PIC register in leaf functions */ + int substitute_pic_base_reg; /* Some local-dynamic symbol. */ const char *some_ld_name; /* Whether the instruction chain has been scanned already. */ @@ -2015,7 +2070,6 @@ typedef struct rs6000_args On the RS/6000, all integer constants are acceptable, most won't be valid for particular insns, though. Only easy FP constants are acceptable. */ - #define LEGITIMATE_CONSTANT_P(X) \ (((GET_CODE (X) != CONST_DOUBLE \ && GET_CODE (X) != CONST_VECTOR) \ @@ -2126,7 +2180,8 @@ typedef struct rs6000_args #define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \ do { \ int win; \ - (X) = rs6000_legitimize_reload_address ((X), (MODE), (OPNUM), \ + /* APPLE LOCAL pass reload addr by address */ \ + (X) = rs6000_legitimize_reload_address (&(X), (MODE), (OPNUM), \ (int)(TYPE), (IND_LEVELS), &win); \ if ( win ) \ goto WIN; \ diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md index 29b36d6e4e5..b2b8e967cd9 100644 --- a/gcc/config/rs6000/rs6000.md +++ b/gcc/config/rs6000/rs6000.md @@ -67,7 +67,7 @@ (const_string "integer")) ;; Length (in bytes). -; '(pc)' in the following doesn't include the instruction itself; it is +; '(pc)' in the following doesn't include the instruction itself; it is ; calculated as if the instruction had zero size. (define_attr "length" "" (if_then_else (eq_attr "type" "branch") @@ -1632,7 +1632,7 @@ operands[3] = gen_reg_rtx (SImode); operands[4] = gen_reg_rtx (SImode); }) - + (define_expand "ffssi2" [(set (match_dup 2) (neg:SI (match_operand:SI 1 "gpc_reg_operand" "r"))) @@ -1648,7 +1648,7 @@ operands[3] = gen_reg_rtx (SImode); operands[4] = gen_reg_rtx (SImode); }) - + (define_expand "mulsi3" [(use (match_operand:SI 0 "gpc_reg_operand" "")) (use (match_operand:SI 1 "gpc_reg_operand" "")) @@ -1672,10 +1672,10 @@ "@ {muls|mullw} %0,%1,%2 {muli|mulli} %0,%1,%2" - [(set (attr "type") + [(set (attr "type") (cond [(match_operand:SI 2 "s8bit_cint_operand" "") (const_string "imul3") - (match_operand:SI 2 "short_cint_operand" "") + (match_operand:SI 2 "short_cint_operand" "") (const_string "imul2")] (const_string "imul")))]) @@ -1687,10 +1687,10 @@ "@ {muls|mullw} %0,%1,%2 {muli|mulli} %0,%1,%2" - [(set (attr "type") + [(set (attr "type") (cond [(match_operand:SI 2 "s8bit_cint_operand" "") (const_string "imul3") - (match_operand:SI 2 "short_cint_operand" "") + (match_operand:SI 2 "short_cint_operand" "") (const_string "imul2")] (const_string "imul")))]) @@ -2568,7 +2568,7 @@ (const_int 0)))] "") -;; Split a logical operation that we can't do in one insn into two insns, +;; Split a logical operation that we can't do in one insn into two insns, ;; each of which does one 16-bit part. This is used by combine. (define_split @@ -4686,7 +4686,7 @@ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS" [(const_int 0)] " -{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]), +{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]), operands[1], operands[2]); DONE; }") @@ -4879,7 +4879,7 @@ (minus:DF (match_operand:DF 3 "gpc_reg_operand" "f") (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f") (match_operand:DF 2 "gpc_reg_operand" "f"))))] - "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD + "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && ! HONOR_SIGNED_ZEROS (DFmode)" "{fnms|fnmsub} %0,%1,%2,%3" [(set_attr "type" "dmul")]) @@ -4892,7 +4892,7 @@ [(set_attr "type" "dsqrt")]) ;; The conditional move instructions allow us to perform max and min -;; operations even when +;; operations even when (define_expand "maxdf3" [(set (match_operand:DF 0 "gpc_reg_operand" "") @@ -4920,7 +4920,7 @@ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS" [(const_int 0)] " -{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]), +{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]), operands[1], operands[2]); DONE; }") @@ -4991,7 +4991,11 @@ { if (TARGET_POWERPC64) { - rtx mem = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0); + /* APPLE LOCAL assign_stack_local_with_alias is used instead of + assign_stack_temp to get better scheduling, at the cost of some + stack space. */ + rtx mem = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode), + GET_MODE_ALIGNMENT (DImode)); rtx t1 = gen_reg_rtx (DImode); rtx t2 = gen_reg_rtx (DImode); emit_insn (gen_floatsidf_ppc64 (operands[0], operands[1], mem, t1, t2)); @@ -5000,7 +5004,11 @@ operands[2] = force_reg (SImode, GEN_INT (0x43300000)); operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503601774854144\", DFmode)); - operands[4] = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0); + /* APPLE LOCAL assign_stack_local_with_alias is used instead of + assign_stack_temp to get better scheduling, at the cost of some + stack space. */ + operands[4] = assign_stack_local_with_alias (DFmode, GET_MODE_SIZE (DFmode), + GET_MODE_ALIGNMENT (DFmode)); operands[5] = gen_reg_rtx (DFmode); operands[6] = gen_reg_rtx (SImode); }") @@ -5046,7 +5054,7 @@ tmp = highword; highword = lowword; lowword = tmp; } - emit_insn (gen_xorsi3 (operands[6], operands[1], + emit_insn (gen_xorsi3 (operands[6], operands[1], GEN_INT (~ (HOST_WIDE_INT) 0x7fffffff))); emit_move_insn (gen_rtx_MEM (SImode, lowword), operands[6]); emit_move_insn (gen_rtx_MEM (SImode, highword), operands[2]); @@ -5073,7 +5081,11 @@ { if (TARGET_POWERPC64) { - rtx mem = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0); + /* APPLE LOCAL assign_stack_local_with_alias is used instead of + assign_stack_temp to get better scheduling, at the cost of some + stack space. */ + rtx mem = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode), + GET_MODE_ALIGNMENT (DImode)); rtx t1 = gen_reg_rtx (DImode); rtx t2 = gen_reg_rtx (DImode); emit_insn (gen_floatunssidf_ppc64 (operands[0], operands[1], mem, @@ -5083,7 +5095,11 @@ operands[2] = force_reg (SImode, GEN_INT (0x43300000)); operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503599627370496\", DFmode)); - operands[4] = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0); + /* APPLE LOCAL assign_stack_local_with_alias is used instead of + assign_stack_temp to get better scheduling, at the cost of some + stack space. */ + operands[4] = assign_stack_local_with_alias (DFmode, GET_MODE_SIZE (DFmode), + GET_MODE_ALIGNMENT (DFmode)); operands[5] = gen_reg_rtx (DFmode); }") @@ -5141,7 +5157,11 @@ " { operands[2] = gen_reg_rtx (DImode); - operands[3] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0); + /* APPLE LOCAL assign_stack_local_with_alias is used instead of + assign_stack_temp to get better scheduling, at the cost of some + stack space. */ + operands[3] = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode), + GET_MODE_ALIGNMENT (DImode)); }") (define_insn "*fix_truncdfsi2_internal" @@ -5642,7 +5662,7 @@ (define_insn "*ashrdisi3_noppc64" [(set (match_operand:SI 0 "gpc_reg_operand" "=r") - (subreg:SI (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r") + (subreg:SI (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r") (const_int 32)) 4))] "TARGET_32BIT && !TARGET_POWERPC64" "* @@ -5652,7 +5672,7 @@ else return \"mr %0,%1\"; }" - [(set_attr "length" "4")]) + [(set_attr "length" "4")]) ;; PowerPC64 DImode operations. @@ -6032,15 +6052,15 @@ (define_expand "ctzdi2" [(set (match_dup 2) (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r"))) - (parallel [(set (match_dup 3) (and:DI (match_dup 1) - (match_dup 2))) + (parallel [(set (match_dup 3) (and:DI (match_dup 1) + (match_dup 2))) (clobber (scratch:CC))]) (set (match_dup 4) (clz:DI (match_dup 3))) (set (match_operand:DI 0 "gpc_reg_operand" "=r") (minus:DI (const_int 63) (match_dup 4)))] "TARGET_POWERPC64" { - operands[2] = gen_reg_rtx (DImode); + operands[2] = gen_reg_rtx (DImode); operands[3] = gen_reg_rtx (DImode); operands[4] = gen_reg_rtx (DImode); }) @@ -6048,15 +6068,15 @@ (define_expand "ffsdi2" [(set (match_dup 2) (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r"))) - (parallel [(set (match_dup 3) (and:DI (match_dup 1) - (match_dup 2))) + (parallel [(set (match_dup 3) (and:DI (match_dup 1) + (match_dup 2))) (clobber (scratch:CC))]) (set (match_dup 4) (clz:DI (match_dup 3))) (set (match_operand:DI 0 "gpc_reg_operand" "=r") (minus:DI (const_int 64) (match_dup 4)))] "TARGET_POWERPC64" { - operands[2] = gen_reg_rtx (DImode); + operands[2] = gen_reg_rtx (DImode); operands[3] = gen_reg_rtx (DImode); operands[4] = gen_reg_rtx (DImode); }) @@ -6656,7 +6676,7 @@ "TARGET_POWERPC64" "sld%I2 %0,%1,%H2" [(set_attr "length" "8")]) - + (define_insn "*ashldi3_internal2" [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y") (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r") @@ -6669,7 +6689,7 @@ #" [(set_attr "type" "delayed_compare") (set_attr "length" "4,8")]) - + (define_split [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "") (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "") @@ -7335,7 +7355,7 @@ (const_int 0)))] "") -;; Split a logical operation that we can't do in one insn into two insns, +;; Split a logical operation that we can't do in one insn into two insns, ;; each of which does one 16-bit part. This is used by combine. (define_split @@ -7349,7 +7369,7 @@ " { rtx i3,i4; - + if (GET_CODE (operands[2]) == CONST_DOUBLE) { HOST_WIDE_INT value = CONST_DOUBLE_LOW (operands[2]); @@ -7577,7 +7597,7 @@ ;; Used by sched, shorten_branches and final when the GOT pseudo reg ;; didn't get allocated to a hard register. -(define_split +(define_split [(set (match_operand:SI 0 "gpc_reg_operand" "") (unspec:SI [(match_operand:SI 1 "got_no_const_operand" "") (match_operand:SI 2 "memory_operand" "")] @@ -7640,12 +7660,16 @@ return \"ld %0,lo16(%2)(%1)\"; else { - operands2[3] = gen_rtx_REG (SImode, RS6000_PIC_OFFSET_TABLE_REGNUM); + /* APPLE LOCAL volatile pic base reg in leaves */ + operands2[3] = gen_rtx_REG (SImode, + (cfun->machine->substitute_pic_base_reg == -1 + ? RS6000_PIC_OFFSET_TABLE_REGNUM + : cfun->machine->substitute_pic_base_reg)); output_asm_insn (\"{l|lwz} %0,lo16(%2)(%1)\", operands); #if TARGET_MACHO if (MACHO_DYNAMIC_NO_PIC_P) output_asm_insn (\"{liu|lis} %L0,ha16(%2+4)\", operands); - else + else /* We cannot rely on ha16(low half)==ha16(high half), alas, although in practice it almost always is. */ output_asm_insn (\"{cau|addis} %L0,%3,ha16(%2+4)\", operands2); @@ -8474,7 +8498,7 @@ (define_split [(set (match_operand:DI 0 "nonimmediate_operand" "") (match_operand:DI 1 "input_operand" ""))] - "reload_completed && !TARGET_POWERPC64 + "reload_completed && !TARGET_POWERPC64 && gpr_or_gpr_p (operands[0], operands[1])" [(pc)] { rs6000_split_multireg_move (operands[0], operands[1]); DONE; }) @@ -8634,7 +8658,7 @@ [(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,m,????r,????r,????r") (match_operand:TI 1 "reg_or_mem_operand" "r,r,r,Q,m")) (clobber (match_scratch:SI 2 "=q,q#X,X,X,X"))] - "TARGET_POWER && ! TARGET_POWERPC64 + "TARGET_POWER && ! TARGET_POWERPC64 && (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))" "* { @@ -8682,7 +8706,7 @@ case 3: /* If the address is not used in the output, we can use lsi. Otherwise, fall through to generating four loads. */ - if (TARGET_STRING + if (TARGET_STRING && ! reg_overlap_mentioned_p (operands[0], operands[1])) return \"{lsi|lswi} %0,%P1,16\"; /* ... fall through ... */ @@ -8698,8 +8722,8 @@ "TARGET_POWERPC64 && (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))" "@ - # - # + # + # #" [(set_attr "type" "*,load,store")]) @@ -9831,7 +9855,7 @@ if (current_function_limit_stack) { rtx available; - available = expand_binop (Pmode, sub_optab, + available = expand_binop (Pmode, sub_optab, stack_pointer_rtx, stack_limit_rtx, NULL_RTX, 1, OPTAB_WIDEN); emit_insn (gen_cond_trap (LTU, available, operands[1], const0_rtx)); @@ -10277,6 +10301,12 @@ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS) output_asm_insn (\"creqv 6,6,6\", operands); +/* APPLE LOCAL -mlongcall */ +#ifdef RS6000_LONG_BRANCH + if (!flag_pic) + return output_call(insn, operands, 0, 0); + else +#endif return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z0@local\" : \"bl %z0\"; }" [(set_attr "type" "branch") @@ -10392,7 +10422,7 @@ (match_operand 1 "" "g")) (use (match_operand:SI 2 "immediate_operand" "O")) (clobber (match_scratch:SI 3 "=l"))] - "TARGET_64BIT + "TARGET_64BIT && DEFAULT_ABI == ABI_AIX && (INTVAL (operands[2]) & CALL_LONG) == 0" "bl %z0\;%." @@ -10446,7 +10476,7 @@ (match_operand 2 "" "g"))) (use (match_operand:SI 3 "immediate_operand" "O")) (clobber (match_scratch:SI 4 "=l"))] - "TARGET_64BIT + "TARGET_64BIT && DEFAULT_ABI == ABI_AIX && (INTVAL (operands[3]) & CALL_LONG) == 0" "bl %z1\;%." @@ -10459,11 +10489,12 @@ ;; operands[2] is the value FUNCTION_ARG returns for the VOID argument ;; which indicates how to set cr1 +;; APPLE LOCAL separate cl into c,*l; switch and attr's expanded to match (define_insn "*call_indirect_nonlocal_sysv" - [(call (mem:SI (match_operand:SI 0 "register_operand" "cl,cl")) - (match_operand 1 "" "g,g")) - (use (match_operand:SI 2 "immediate_operand" "O,n")) - (clobber (match_scratch:SI 3 "=l,l"))] + [(call (mem:SI (match_operand:SI 0 "register_operand" "c,*l,c,*l")) + (match_operand 1 "" "g,g,g,g")) + (use (match_operand:SI 2 "immediate_operand" "O,O,n,n")) + (clobber (match_scratch:SI 3 "=l,l,l,l"))] "DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN" { @@ -10475,8 +10506,8 @@ return "b%T0l"; } - [(set_attr "type" "jmpreg,jmpreg") - (set_attr "length" "4,8")]) + [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg") + (set_attr "length" "4,4,8,8")]) (define_insn "*call_nonlocal_sysv" [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s,s")) @@ -10502,12 +10533,13 @@ [(set_attr "type" "branch,branch") (set_attr "length" "4,8")]) +;; APPLE LOCAL separate cl into c,*l; switch and attr's expanded to match (define_insn "*call_value_indirect_nonlocal_sysv" [(set (match_operand 0 "" "") - (call (mem:SI (match_operand:SI 1 "register_operand" "cl,cl")) - (match_operand 2 "" "g,g"))) - (use (match_operand:SI 3 "immediate_operand" "O,n")) - (clobber (match_scratch:SI 4 "=l,l"))] + (call (mem:SI (match_operand:SI 1 "register_operand" "c,*l,c,*l")) + (match_operand 2 "" "g,g,g,g"))) + (use (match_operand:SI 3 "immediate_operand" "O,O,n,n")) + (clobber (match_scratch:SI 4 "=l,l,l,l"))] "DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN" { @@ -10519,8 +10551,8 @@ return "b%T1l"; } - [(set_attr "type" "jmpreg,jmpreg") - (set_attr "length" "4,8")]) + [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg") + (set_attr "length" "4,4,8,8")]) (define_insn "*call_value_nonlocal_sysv" [(set (match_operand 0 "" "") @@ -10575,6 +10607,52 @@ DONE; }") +;; APPLE LOCAL sibcall patterns +;; APPLE MERGE modify FSF patterns below instead? +;; this and similar patterns must be marked as using LR, otherwise +;; dataflow will try to delete the store into it. This is true +;; even when the actual reg to jump to is in CTR, when LR was +;; saved and restored around the PIC-setting BCL. +(define_insn "*sibcall_symbolic" + [(call (mem:SI (match_operand:SI 0 "call_operand" "s,c")) + (match_operand 1 "" "")) + (use (match_operand 2 "" "")) + (use (match_scratch:SI 3 "=l,l")) + (return)] + "! TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN" + "* +{ + switch (which_alternative) + { + case 0: return \"b %z0\"; + case 1: return \"b%T0\"; + default: abort(); + } +}" + [(set_attr "type" "branch") + (set_attr "length" "4")]) + +(define_insn "*sibcall_value_symbolic" + [(set (match_operand 0 "" "") + (call (mem:SI (match_operand:SI 1 "call_operand" "s,c")) + (match_operand 2 "" ""))) + (use (match_operand:SI 3 "" "")) + (use (match_scratch:SI 4 "=l,l")) + (return)] + "! TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN" + "* +{ + switch (which_alternative) + { + case 0: return \"b %z1\"; + case 1: return \"b%T1\"; + default: abort(); + } +}" + [(set_attr "type" "branch") + (set_attr "length" "4")]) +;; APPLE LOCAL end sibcall patterns + ;; sibling call patterns (define_expand "sibcall" [(parallel [(call (mem:SI (match_operand 0 "address_operand" "")) @@ -10704,7 +10782,7 @@ (use (match_operand:SI 2 "immediate_operand" "O")) (use (match_operand:SI 3 "register_operand" "l")) (return)] - "TARGET_64BIT + "TARGET_64BIT && DEFAULT_ABI == ABI_AIX && (INTVAL (operands[2]) & CALL_LONG) == 0" "b %z0" @@ -10732,7 +10810,7 @@ (use (match_operand:SI 3 "immediate_operand" "O")) (use (match_operand:SI 4 "register_operand" "l")) (return)] - "TARGET_64BIT + "TARGET_64BIT && DEFAULT_ABI == ABI_AIX && (INTVAL (operands[3]) & CALL_LONG) == 0" "b %z1" @@ -11009,11 +11087,11 @@ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))] "" " -{ +{ if (! rs6000_compare_fp_p) FAIL; - rs6000_emit_sCOND (NE, operands[0]); + rs6000_emit_sCOND (NE, operands[0]); DONE; }") @@ -11041,7 +11119,7 @@ && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx)) FAIL; - rs6000_emit_sCOND (GT, operands[0]); + rs6000_emit_sCOND (GT, operands[0]); DONE; }") @@ -11055,7 +11133,7 @@ && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx)) FAIL; - rs6000_emit_sCOND (LE, operands[0]); + rs6000_emit_sCOND (LE, operands[0]); DONE; }") @@ -11065,11 +11143,11 @@ "" " { - if (! rs6000_compare_fp_p + if (! rs6000_compare_fp_p && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx)) FAIL; - rs6000_emit_sCOND (LT, operands[0]); + rs6000_emit_sCOND (LT, operands[0]); DONE; }") @@ -13791,7 +13869,7 @@ }") (define_expand "tablejumpdi" - [(set (match_dup 4) + [(set (match_dup 4) (sign_extend:DI (match_operand:SI 0 "lwa_operand" "rm"))) (set (match_dup 3) (plus:DI (match_dup 4) @@ -13900,7 +13978,7 @@ (const_int 1)) (label_ref (match_operand 0 "" "")) (pc))) - (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l") + (set (match_operand:SI 2 "nonimmediate_operand" "=1,*r,m,*q*c*l") (plus:SI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -13924,7 +14002,7 @@ (const_int 1)) (pc) (label_ref (match_operand 0 "" "")))) - (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l") + (set (match_operand:SI 2 "nonimmediate_operand" "=1,*r,m,*q*c*l") (plus:SI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -13948,7 +14026,7 @@ (const_int 1)) (label_ref (match_operand 0 "" "")) (pc))) - (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l") + (set (match_operand:DI 2 "nonimmediate_operand" "=1,*r,m,*c*l") (plus:DI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -13972,7 +14050,7 @@ (const_int 1)) (pc) (label_ref (match_operand 0 "" "")))) - (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l") + (set (match_operand:DI 2 "nonimmediate_operand" "=1,*r,m,*c*l") (plus:DI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -13998,7 +14076,7 @@ (const_int 0)) (label_ref (match_operand 0 "" "")) (pc))) - (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l") + (set (match_operand:SI 2 "nonimmediate_operand" "=1,*r,m,*q*c*l") (plus:SI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -14022,7 +14100,7 @@ (const_int 0)) (pc) (label_ref (match_operand 0 "" "")))) - (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l") + (set (match_operand:SI 2 "nonimmediate_operand" "=1,*r,m,*q*c*l") (plus:SI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -14046,7 +14124,7 @@ (const_int 0)) (label_ref (match_operand 0 "" "")) (pc))) - (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l") + (set (match_operand:DI 2 "nonimmediate_operand" "=1,*r,m,*c*l") (plus:DI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -14070,7 +14148,7 @@ (const_int 0)) (pc) (label_ref (match_operand 0 "" "")))) - (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l") + (set (match_operand:DI 2 "nonimmediate_operand" "=1,*r,m,*c*l") (plus:DI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -14096,7 +14174,7 @@ (const_int 1)) (label_ref (match_operand 0 "" "")) (pc))) - (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l") + (set (match_operand:SI 2 "nonimmediate_operand" "=1,*r,m,*q*c*l") (plus:SI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -14120,7 +14198,7 @@ (const_int 1)) (pc) (label_ref (match_operand 0 "" "")))) - (set (match_operand:SI 2 "register_operand" "=1,*r,m,*q*c*l") + (set (match_operand:SI 2 "nonimmediate_operand" "=1,*r,m,*q*c*l") (plus:SI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -14144,7 +14222,7 @@ (const_int 1)) (label_ref (match_operand 0 "" "")) (pc))) - (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l") + (set (match_operand:DI 2 "nonimmediate_operand" "=1,*r,m,*c*l") (plus:DI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -14168,7 +14246,7 @@ (const_int 1)) (pc) (label_ref (match_operand 0 "" "")))) - (set (match_operand:DI 2 "register_operand" "=1,*r,m,*c*l") + (set (match_operand:DI 2 "nonimmediate_operand" "=1,*r,m,*c*l") (plus:DI (match_dup 1) (const_int -1))) (clobber (match_scratch:CC 3 "=X,&x,&x,&x")) @@ -14363,7 +14441,7 @@ (define_insn "movesi_from_cr" [(set (match_operand:SI 0 "gpc_reg_operand" "=r") - (unspec:SI [(reg:CC 68) (reg:CC 69) (reg:CC 70) (reg:CC 71) + (unspec:SI [(reg:CC 68) (reg:CC 69) (reg:CC 70) (reg:CC 71) (reg:CC 72) (reg:CC 73) (reg:CC 74) (reg:CC 75)] UNSPEC_MOVESI_FROM_CR))] "" @@ -14376,7 +14454,7 @@ (match_operand:SI 2 "gpc_reg_operand" "r"))])] "TARGET_MULTIPLE" "{stm|stmw} %2,%1") - + (define_insn "*save_fpregs_si" [(match_parallel 0 "any_operand" [(clobber (match_operand:SI 1 "register_operand" "=l")) @@ -14388,6 +14466,35 @@ [(set_attr "type" "branch") (set_attr "length" "4")]) +/* APPLE LOCAL begin unnamed*/ +(define_insn "*save_fpregs_with_label_si" + [(match_parallel 0 "any_operand" + [(clobber (match_operand:SI 1 "register_operand" "=l")) + (use (match_operand:SI 2 "call_operand" "s")) + (use (match_operand:SI 3 "" "")) + (set (match_operand:DF 4 "memory_operand" "=m") + (match_operand:DF 5 "gpc_reg_operand" "f"))])] + "TARGET_32BIT" + "* +#if TARGET_MACHO + const char *picbase = machopic_function_base_name (); + char *tmp; + operands[3] = gen_rtx_SYMBOL_REF (Pmode, ggc_alloc_string (picbase, -1)); + if (TARGET_LONG_BRANCH) + { + tmp = ggc_alloc (strlen (XSTR (operands[2], 0)) + strlen (XSTR (operands[3], 0)) + 2); + strcpy (tmp, output_call(insn, operands, 2, 2)); + strcat (tmp, \"\\n%3:\"); + return tmp; + } + else +#endif + return \"bl %z2\\n%3:\"; +" + [(set_attr "type" "branch") + (set_attr "length" "4")]) +/* APPLE LOCAL end unnamed */ + (define_insn "*save_fpregs_di" [(match_parallel 0 "any_operand" [(clobber (match_operand:DI 1 "register_operand" "=l")) @@ -14452,7 +14559,7 @@ (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r") (match_operand 2 "immediate_operand" "n")] UNSPEC_MOVESI_TO_CR))] - "GET_CODE (operands[0]) == REG + "GET_CODE (operands[0]) == REG && CR_REGNO_P (REGNO (operands[0])) && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 1 << (75 - REGNO (operands[0]))" @@ -14469,7 +14576,7 @@ (match_operand:SI 2 "memory_operand" "m"))])] "TARGET_MULTIPLE" "{lm|lmw} %1,%2") - + (define_insn "*return_internal_si" [(return) (use (match_operand:SI 0 "register_operand" "lc"))] @@ -14495,7 +14602,14 @@ (set (match_operand:DF 3 "gpc_reg_operand" "=f") (match_operand:DF 4 "memory_operand" "m"))])] "TARGET_32BIT" - "b %z2") + { +#if TARGET_MACHO + if (TARGET_LONG_BRANCH) + return output_call(insn, operands, 2, 2); + else +#endif + return "b %z2"; + }) (define_insn "*return_and_restore_fpregs_di" [(match_parallel 0 "any_operand" @@ -14505,7 +14619,14 @@ (set (match_operand:DF 3 "gpc_reg_operand" "=f") (match_operand:DF 4 "memory_operand" "m"))])] "TARGET_64BIT" - "b %z2") + { +#if TARGET_MACHO + if (TARGET_LONG_BRANCH) + return output_call(insn, operands, 2, 2); + else +#endif + return "b %z2"; + }) ; This is used in compiling the unwind routines. (define_expand "eh_return" diff --git a/gcc/config/rs6000/sysv4.h b/gcc/config/rs6000/sysv4.h index 1e0ac3707f6..88bf8196b17 100644 --- a/gcc/config/rs6000/sysv4.h +++ b/gcc/config/rs6000/sysv4.h @@ -434,6 +434,13 @@ do { \ #define BSS_SECTION_ASM_OP "\t.section\t\".bss\"" +/* APPLE LOCAL begin hot/cold partitioning */ +#define HOT_TEXT_SECTION_NAME ".text" +#define NORMAL_TEXT_SECTION_NAME ".text" +#define UNLIKELY_EXECUTED_TEXT_SECTION_NAME ".text.unlikely" +#define SECTION_FORMAT_STRING ".section\t\"%s\"\n\t.align 2\n" +/* APPLE LOCAL end hot/cold partitioning */ + /* Override elfos.h definition. */ #undef INIT_SECTION_ASM_OP #define INIT_SECTION_ASM_OP "\t.section\t\".init\",\"ax\"" diff --git a/gcc/config/rs6000/t-darwin b/gcc/config/rs6000/t-darwin index 185bb00eed2..af710ed7cef 100644 --- a/gcc/config/rs6000/t-darwin +++ b/gcc/config/rs6000/t-darwin @@ -1,7 +1,11 @@ +# APPLE LOCAL begin AltiVec # Add trampoline and long double support to libgcc. LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-tramp.asm \ - $(srcdir)/config/rs6000/darwin-ldouble.c + $(srcdir)/config/rs6000/darwin-fpsave.asm \ + $(srcdir)/config/rs6000/darwin-ldouble.c +# Enable AltiVec instructions when assembling the aforementioned .asm files. # For libgcc, we always want 128-bit long double, since a libgcc built with # that will work without it. -TARGET_LIBGCC2_CFLAGS = -mlong-double-128 +TARGET_LIBGCC2_CFLAGS = -mlong-double-128 -Wa,-force_cpusubtype_ALL +# APPLE LOCAL end AltiVec diff --git a/gcc/config/rs6000/t-linux64 b/gcc/config/rs6000/t-linux64 index 0e86f5f26ff..e8389a0d995 100644 --- a/gcc/config/rs6000/t-linux64 +++ b/gcc/config/rs6000/t-linux64 @@ -37,5 +37,9 @@ fp-bit32.c: $(srcdir)/config/fp-bit.c mklibgcc: bispecs bispecs: specs - sed -e '/cc1_options/{ n; s/$$/ %{!m32:-mlong-double-128}/; }' < specs > $@ + if [ x`$(GCC_FOR_TARGET) -print-multi-os-directory` = x../lib ]; then \ + sed -e '/cc1_options/{ n; s/$$/ %{m64:-mlong-double-128}/; }' < specs > $@; \ + else \ + sed -e '/cc1_options/{ n; s/$$/ %{!m32:-mlong-double-128}/; }' < specs > $@; \ + fi diff --git a/gcc/config/rs6000/t-rs6000 b/gcc/config/rs6000/t-rs6000 index 9546461e57d..9cc60036c00 100644 --- a/gcc/config/rs6000/t-rs6000 +++ b/gcc/config/rs6000/t-rs6000 @@ -18,6 +18,7 @@ rs6000-c.o: $(srcdir)/config/rs6000/rs6000-c.c \ # The rs6000 backend doesn't cause warnings in these files. insn-conditions.o-warn = + # The files below trigger warnings in tree-ssa because of the gimplifier # emitting code that confuse the compiler into thinking that some variables # are used uninitialized. diff --git a/gcc/config/rs6000/t-rtems b/gcc/config/rs6000/t-rtems deleted file mode 100644 index 364a22d2278..00000000000 --- a/gcc/config/rs6000/t-rtems +++ /dev/null @@ -1,86 +0,0 @@ -# Multilibs for powerpc RTEMS targets. - -MULTILIB_OPTIONS = \ -mcpu=403/mcpu=505/mcpu=601/mcpu=602/mcpu=603/mcpu=603e/mcpu=604/mcpu=750/mcpu=821/mcpu=860 \ -Dmpc509/Dmpc8260 \ -D_OLD_EXCEPTIONS \ -msoft-float - -MULTILIB_DIRNAMES = \ -m403 m505 m601 m602 m603 m603e m604 m750 m821 m860 \ -mpc509 \ -mpc8260 \ -roe \ -nof - -MULTILIB_EXTRA_OPTS = mrelocatable-lib mno-eabi mstrict-align - -# MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT} -MULTILIB_MATCHES = ${MULTILIB_MATCHES_ENDIAN} \ - ${MULTILIB_MATCHES_SYSV} \ - mcpu?505/Dmpc505=mcpu?505/Dmpc509 - -# -# RTEMS old/new-exceptions handling -# -# old-exception processing is depredicated, therefore -# -# * Cpu-variants supporting new exception processing are build -# with new exception processing only -# * Cpu-variants not having been ported to new exception processing are -# build with old and new exception processing -# - -# Cpu-variants supporting new exception processing only -MULTILIB_NEW_EXCEPTIONS_ONLY = \ -*mcpu=604*/*D_OLD_EXCEPTIONS* \ -*mcpu=750*/*D_OLD_EXCEPTIONS* \ -*mcpu=821*/*D_OLD_EXCEPTIONS* \ -*Dmpc8260*/*D_OLD_EXCEPTIONS* \ -*mcpu=860*/*D_OLD_EXCEPTIONS* - -# Soft-float only, default implies msoft-float -# NOTE: Must match with MULTILIB_MATCHES_FLOAT and MULTILIB_MATCHES -MULTILIB_SOFTFLOAT_ONLY = \ -mcpu=403/*msoft-float* \ -mcpu=821/*msoft-float* \ -mcpu=860/*msoft-float* - -# Hard-float only, take out msoft-float -MULTILIB_HARDFLOAT_ONLY = \ -mcpu=505/*msoft-float* - -MULTILIB_EXCEPTIONS = - -# Disallow -D_OLD_EXCEPTIONS without other options -MULTILIB_EXCEPTIONS += D_OLD_EXCEPTIONS* - -# Disallow -Dppc and -Dmpc without other options -MULTILIB_EXCEPTIONS += Dppc* Dmpc* - -MULTILIB_EXCEPTIONS += \ -${MULTILIB_NEW_EXCEPTIONS_ONLY} \ -${MULTILIB_SOFTFLOAT_ONLY} \ -${MULTILIB_HARDFLOAT_ONLY} - -# Special rules -# Take out all variants we don't want -MULTILIB_EXCEPTIONS += mcpu=403/Dmpc509* -MULTILIB_EXCEPTIONS += mcpu=403/Dmpc8260* -MULTILIB_EXCEPTIONS += mcpu=505/Dmpc509* -MULTILIB_EXCEPTIONS += mcpu=505/Dmpc8260* -MULTILIB_EXCEPTIONS += mcpu=601/Dmpc509* -MULTILIB_EXCEPTIONS += mcpu=601/Dmpc8260* -MULTILIB_EXCEPTIONS += mcpu=602/Dmpc509* -MULTILIB_EXCEPTIONS += mcpu=602/Dmpc8260* -MULTILIB_EXCEPTIONS += mcpu=603/Dmpc509* -MULTILIB_EXCEPTIONS += mcpu=603/Dmpc8260* -MULTILIB_EXCEPTIONS += mcpu=603e/Dmpc509* -MULTILIB_EXCEPTIONS += mcpu=604/Dmpc509* -MULTILIB_EXCEPTIONS += mcpu=604/Dmpc8260* -MULTILIB_EXCEPTIONS += mcpu=750/Dmpc509* -MULTILIB_EXCEPTIONS += mcpu=750/Dmpc8260* -MULTILIB_EXCEPTIONS += mcpu=821/Dmpc509* -MULTILIB_EXCEPTIONS += mcpu=821/Dmpc8260* -MULTILIB_EXCEPTIONS += mcpu=860/Dmpc509* -MULTILIB_EXCEPTIONS += mcpu=860/Dmpc8260* diff --git a/gcc/config/rs6000/vec.h b/gcc/config/rs6000/vec.h new file mode 100644 index 00000000000..56e8786f25b --- /dev/null +++ b/gcc/config/rs6000/vec.h @@ -0,0 +1,4515 @@ +/* APPLE LOCAL file AltiVec */ +/* This file is generated by ops-to-gp. Do not edit. */ + +/* To regenerate execute: + ops-to-gp -gcc vec.ops builtin.ops + with the current directory being gcc/config/rs6000. */ + +static const struct builtin B1_vec_abs = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 11, "vec_abs:1", "4", CODE_FOR_xfx_perm, B_UID(0) }; +static const struct builtin B2_vec_abs = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 11, "vec_abs:2", "2", CODE_FOR_xfx_perm, B_UID(1) }; +static const struct builtin B3_vec_abs = { { &T_vec_s32, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 11, "vec_abs:3", "3", CODE_FOR_xfx_perm, B_UID(2) }; +static const struct builtin B4_vec_abs = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s8, 1, FALSE, FALSE, 11, "vec_abs:4", "1", CODE_FOR_xfx_perm, B_UID(3) }; +static const struct builtin B1_vec_abss = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 11, "vec_abss:1", "6", CODE_FOR_xfx_perm, B_UID(4) }; +static const struct builtin B2_vec_abss = { { &T_vec_s32, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 11, "vec_abss:2", "7", CODE_FOR_xfx_perm, B_UID(5) }; +static const struct builtin B3_vec_abss = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s8, 1, FALSE, FALSE, 11, "vec_abss:3", "5", CODE_FOR_xfx_perm, B_UID(6) }; +static const struct builtin B1_vec_vadduhm = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:1", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(7) }; +static const struct builtin B2_vec_vadduhm = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:2", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(8) }; +static const struct builtin B1_vec_vadduwm = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:1", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(9) }; +static const struct builtin B2_vec_vadduwm = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:2", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(10) }; +static const struct builtin B1_vec_vaddubm = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:1", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(11) }; +static const struct builtin B2_vec_vaddubm = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:2", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(12) }; +static const struct builtin B_vec_vaddfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vaddfp", "*vaddfp", CODE_FOR_xfxx_fp, B_UID(13) }; +static const struct builtin B3_vec_vadduhm = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:3", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(14) }; +static const struct builtin B4_vec_vadduhm = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:4", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(15) }; +static const struct builtin B3_vec_vadduwm = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:3", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(16) }; +static const struct builtin B4_vec_vadduwm = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:4", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(17) }; +static const struct builtin B3_vec_vaddubm = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:3", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(18) }; +static const struct builtin B4_vec_vaddubm = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:4", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(19) }; +static const struct builtin B5_vec_vadduhm = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:5", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(20) }; +static const struct builtin B6_vec_vadduhm = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:6", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(21) }; +static const struct builtin B5_vec_vadduwm = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:5", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(22) }; +static const struct builtin B6_vec_vadduwm = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:6", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(23) }; +static const struct builtin B5_vec_vaddubm = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:5", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(24) }; +static const struct builtin B6_vec_vaddubm = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:6", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(25) }; +static const struct builtin B_vec_vaddcuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vaddcuw", "*vaddcuw", CODE_FOR_xfxx_simple, B_UID(26) }; +static const struct builtin B1_vec_vaddshs = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:1", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(27) }; +static const struct builtin B1_vec_vadduhs = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:1", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(28) }; +static const struct builtin B1_vec_vaddsws = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:1", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(29) }; +static const struct builtin B1_vec_vadduws = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:1", "*vadduws", CODE_FOR_xfxx_simple, B_UID(30) }; +static const struct builtin B1_vec_vaddsbs = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:1", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(31) }; +static const struct builtin B1_vec_vaddubs = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:1", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(32) }; +static const struct builtin B2_vec_vaddshs = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:2", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(33) }; +static const struct builtin B3_vec_vaddshs = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:3", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(34) }; +static const struct builtin B2_vec_vaddsws = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:2", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(35) }; +static const struct builtin B3_vec_vaddsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:3", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(36) }; +static const struct builtin B2_vec_vaddsbs = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:2", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(37) }; +static const struct builtin B3_vec_vaddsbs = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:3", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(38) }; +static const struct builtin B2_vec_vadduhs = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:2", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(39) }; +static const struct builtin B3_vec_vadduhs = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:3", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(40) }; +static const struct builtin B2_vec_vadduws = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:2", "*vadduws", CODE_FOR_xfxx_simple, B_UID(41) }; +static const struct builtin B3_vec_vadduws = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:3", "*vadduws", CODE_FOR_xfxx_simple, B_UID(42) }; +static const struct builtin B2_vec_vaddubs = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:2", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(43) }; +static const struct builtin B3_vec_vaddubs = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:3", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(44) }; +static const struct builtin B1_vec_all_eq = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:1", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(45) }; +static const struct builtin B2_vec_all_eq = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:2", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(46) }; +static const struct builtin B3_vec_all_eq = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:3", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(47) }; +static const struct builtin B4_vec_all_eq = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:4", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(48) }; +static const struct builtin B5_vec_all_eq = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:5", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(49) }; +static const struct builtin B6_vec_all_eq = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:6", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(50) }; +static const struct builtin B7_vec_all_eq = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:7", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(51) }; +static const struct builtin B8_vec_all_eq = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:8", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(52) }; +static const struct builtin B9_vec_all_eq = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:9", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(53) }; +static const struct builtin B10_vec_all_eq = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:10", "*vcmpeqfp.", CODE_FOR_j_24_t_fxx_simple, B_UID(54) }; +static const struct builtin B11_vec_all_eq = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:11", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(55) }; +static const struct builtin B12_vec_all_eq = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:12", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(56) }; +static const struct builtin B13_vec_all_eq = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:13", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(57) }; +static const struct builtin B14_vec_all_eq = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:14", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(58) }; +static const struct builtin B15_vec_all_eq = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:15", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(59) }; +static const struct builtin B16_vec_all_eq = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:16", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(60) }; +static const struct builtin B17_vec_all_eq = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:17", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(61) }; +static const struct builtin B18_vec_all_eq = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:18", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(62) }; +static const struct builtin B19_vec_all_eq = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:19", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(63) }; +static const struct builtin B20_vec_all_eq = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:20", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(64) }; +static const struct builtin B21_vec_all_eq = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:21", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(65) }; +static const struct builtin B22_vec_all_eq = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:22", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(66) }; +static const struct builtin B23_vec_all_eq = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:23", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(67) }; +static const struct builtin B1_vec_all_ge = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:1", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(68) }; +static const struct builtin B2_vec_all_ge = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:2", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(69) }; +static const struct builtin B3_vec_all_ge = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:3", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(70) }; +static const struct builtin B4_vec_all_ge = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:4", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(71) }; +static const struct builtin B5_vec_all_ge = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:5", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(72) }; +static const struct builtin B6_vec_all_ge = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:6", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(73) }; +static const struct builtin B7_vec_all_ge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_ge:7", "*vcmpgefp.", CODE_FOR_j_24_t_fxx_simple, B_UID(74) }; +static const struct builtin B8_vec_all_ge = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:8", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(75) }; +static const struct builtin B9_vec_all_ge = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:9", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(76) }; +static const struct builtin B10_vec_all_ge = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:10", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(77) }; +static const struct builtin B11_vec_all_ge = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:11", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(78) }; +static const struct builtin B12_vec_all_ge = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:12", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(79) }; +static const struct builtin B13_vec_all_ge = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:13", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(80) }; +static const struct builtin B14_vec_all_ge = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:14", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(81) }; +static const struct builtin B15_vec_all_ge = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:15", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(82) }; +static const struct builtin B16_vec_all_ge = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:16", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(83) }; +static const struct builtin B17_vec_all_ge = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:17", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(84) }; +static const struct builtin B18_vec_all_ge = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:18", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(85) }; +static const struct builtin B19_vec_all_ge = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:19", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(86) }; +static const struct builtin B1_vec_all_gt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:1", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(87) }; +static const struct builtin B2_vec_all_gt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:2", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(88) }; +static const struct builtin B3_vec_all_gt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:3", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(89) }; +static const struct builtin B4_vec_all_gt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:4", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(90) }; +static const struct builtin B5_vec_all_gt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:5", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(91) }; +static const struct builtin B6_vec_all_gt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:6", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(92) }; +static const struct builtin B7_vec_all_gt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:7", "*vcmpgtfp.", CODE_FOR_j_24_t_fxx_simple, B_UID(93) }; +static const struct builtin B8_vec_all_gt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:8", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(94) }; +static const struct builtin B9_vec_all_gt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:9", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(95) }; +static const struct builtin B10_vec_all_gt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:10", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(96) }; +static const struct builtin B11_vec_all_gt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:11", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(97) }; +static const struct builtin B12_vec_all_gt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:12", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(98) }; +static const struct builtin B13_vec_all_gt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:13", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(99) }; +static const struct builtin B14_vec_all_gt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:14", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(100) }; +static const struct builtin B15_vec_all_gt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:15", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(101) }; +static const struct builtin B16_vec_all_gt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:16", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(102) }; +static const struct builtin B17_vec_all_gt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:17", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(103) }; +static const struct builtin B18_vec_all_gt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:18", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(104) }; +static const struct builtin B19_vec_all_gt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:19", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(105) }; +static const struct builtin B_vec_all_in = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_in", "*vcmpbfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(106) }; +static const struct builtin B1_vec_all_le = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:1", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(107) }; +static const struct builtin B2_vec_all_le = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:2", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(108) }; +static const struct builtin B3_vec_all_le = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:3", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(109) }; +static const struct builtin B4_vec_all_le = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:4", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(110) }; +static const struct builtin B5_vec_all_le = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:5", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(111) }; +static const struct builtin B6_vec_all_le = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:6", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(112) }; +static const struct builtin B7_vec_all_le = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_le:7", "*vcmpgefp.", CODE_FOR_j_24_t_frxx_simple, B_UID(113) }; +static const struct builtin B8_vec_all_le = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:8", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(114) }; +static const struct builtin B9_vec_all_le = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:9", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(115) }; +static const struct builtin B10_vec_all_le = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:10", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(116) }; +static const struct builtin B11_vec_all_le = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:11", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(117) }; +static const struct builtin B12_vec_all_le = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:12", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(118) }; +static const struct builtin B13_vec_all_le = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:13", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(119) }; +static const struct builtin B14_vec_all_le = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:14", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(120) }; +static const struct builtin B15_vec_all_le = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:15", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(121) }; +static const struct builtin B16_vec_all_le = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:16", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(122) }; +static const struct builtin B17_vec_all_le = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:17", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(123) }; +static const struct builtin B18_vec_all_le = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:18", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(124) }; +static const struct builtin B19_vec_all_le = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:19", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(125) }; +static const struct builtin B1_vec_all_lt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:1", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(126) }; +static const struct builtin B2_vec_all_lt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:2", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(127) }; +static const struct builtin B3_vec_all_lt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:3", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(128) }; +static const struct builtin B4_vec_all_lt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:4", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(129) }; +static const struct builtin B5_vec_all_lt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:5", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(130) }; +static const struct builtin B6_vec_all_lt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:6", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(131) }; +static const struct builtin B7_vec_all_lt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:7", "*vcmpgtfp.", CODE_FOR_j_24_t_frxx_simple, B_UID(132) }; +static const struct builtin B8_vec_all_lt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:8", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(133) }; +static const struct builtin B9_vec_all_lt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:9", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(134) }; +static const struct builtin B10_vec_all_lt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:10", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(135) }; +static const struct builtin B11_vec_all_lt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:11", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(136) }; +static const struct builtin B12_vec_all_lt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:12", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(137) }; +static const struct builtin B13_vec_all_lt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:13", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(138) }; +static const struct builtin B14_vec_all_lt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:14", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(139) }; +static const struct builtin B15_vec_all_lt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:15", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(140) }; +static const struct builtin B16_vec_all_lt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:16", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(141) }; +static const struct builtin B17_vec_all_lt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:17", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(142) }; +static const struct builtin B18_vec_all_lt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:18", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(143) }; +static const struct builtin B19_vec_all_lt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:19", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(144) }; +static const struct builtin B_vec_all_nan = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc26td, 1, FALSE, FALSE, 0, "vec_all_nan", "*vcmpeqfp.", CODE_FOR_j_26_t_fx_simple, B_UID(145) }; +static const struct builtin B1_vec_all_ne = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:1", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(146) }; +static const struct builtin B2_vec_all_ne = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:2", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(147) }; +static const struct builtin B3_vec_all_ne = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:3", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(148) }; +static const struct builtin B4_vec_all_ne = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:4", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(149) }; +static const struct builtin B5_vec_all_ne = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:5", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(150) }; +static const struct builtin B6_vec_all_ne = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:6", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(151) }; +static const struct builtin B7_vec_all_ne = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:7", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(152) }; +static const struct builtin B8_vec_all_ne = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:8", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(153) }; +static const struct builtin B9_vec_all_ne = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:9", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(154) }; +static const struct builtin B10_vec_all_ne = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:10", "*vcmpeqfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(155) }; +static const struct builtin B11_vec_all_ne = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:11", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(156) }; +static const struct builtin B12_vec_all_ne = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:12", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(157) }; +static const struct builtin B13_vec_all_ne = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:13", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(158) }; +static const struct builtin B14_vec_all_ne = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:14", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(159) }; +static const struct builtin B15_vec_all_ne = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:15", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(160) }; +static const struct builtin B16_vec_all_ne = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:16", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(161) }; +static const struct builtin B17_vec_all_ne = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:17", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(162) }; +static const struct builtin B18_vec_all_ne = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:18", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(163) }; +static const struct builtin B19_vec_all_ne = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:19", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(164) }; +static const struct builtin B20_vec_all_ne = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:20", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(165) }; +static const struct builtin B21_vec_all_ne = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:21", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(166) }; +static const struct builtin B22_vec_all_ne = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:22", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(167) }; +static const struct builtin B23_vec_all_ne = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:23", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(168) }; +static const struct builtin B_vec_all_nge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_nge", "*vcmpgefp.", CODE_FOR_j_26_t_fxx_simple, B_UID(169) }; +static const struct builtin B_vec_all_ngt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ngt", "*vcmpgtfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(170) }; +static const struct builtin B_vec_all_nle = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_nle", "*vcmpgefp.", CODE_FOR_j_26_t_frxx_simple, B_UID(171) }; +static const struct builtin B_vec_all_nlt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_nlt", "*vcmpgtfp.", CODE_FOR_j_26_t_frxx_simple, B_UID(172) }; +static const struct builtin B_vec_all_numeric = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc24td, 1, FALSE, FALSE, 0, "vec_all_numeric", "*vcmpeqfp.", CODE_FOR_j_24_t_fx_simple, B_UID(173) }; +static const struct builtin B1_vec_vand = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 2, "vec_vand:1", "*vand", CODE_FOR_xfxx_simple, B_UID(174) }; +static const struct builtin B2_vec_vand = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:2", "*vand", CODE_FOR_xfxx_simple, B_UID(175) }; +static const struct builtin B3_vec_vand = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:3", "*vand", CODE_FOR_xfxx_simple, B_UID(176) }; +static const struct builtin B4_vec_vand = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 2, "vec_vand:4", "*vand", CODE_FOR_xfxx_simple, B_UID(177) }; +static const struct builtin B5_vec_vand = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:5", "*vand", CODE_FOR_xfxx_simple, B_UID(178) }; +static const struct builtin B6_vec_vand = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:6", "*vand", CODE_FOR_xfxx_simple, B_UID(179) }; +static const struct builtin B7_vec_vand = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:7", "*vand", CODE_FOR_xfxx_simple, B_UID(180) }; +static const struct builtin B8_vec_vand = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 2, "vec_vand:8", "*vand", CODE_FOR_xfxx_simple, B_UID(181) }; +static const struct builtin B9_vec_vand = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:9", "*vand", CODE_FOR_xfxx_simple, B_UID(182) }; +static const struct builtin B10_vec_vand = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:10", "*vand", CODE_FOR_xfxx_simple, B_UID(183) }; +static const struct builtin B11_vec_vand = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:11", "*vand", CODE_FOR_xfxx_simple, B_UID(184) }; +static const struct builtin B12_vec_vand = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:12", "*vand", CODE_FOR_xfxx_simple, B_UID(185) }; +static const struct builtin B13_vec_vand = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:13", "*vand", CODE_FOR_xfxx_simple, B_UID(186) }; +static const struct builtin B14_vec_vand = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:14", "*vand", CODE_FOR_xfxx_simple, B_UID(187) }; +static const struct builtin B15_vec_vand = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:15", "*vand", CODE_FOR_xfxx_simple, B_UID(188) }; +static const struct builtin B16_vec_vand = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:16", "*vand", CODE_FOR_xfxx_simple, B_UID(189) }; +static const struct builtin B17_vec_vand = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:17", "*vand", CODE_FOR_xfxx_simple, B_UID(190) }; +static const struct builtin B18_vec_vand = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:18", "*vand", CODE_FOR_xfxx_simple, B_UID(191) }; +static const struct builtin B19_vec_vand = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:19", "*vand", CODE_FOR_xfxx_simple, B_UID(192) }; +static const struct builtin B20_vec_vand = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:20", "*vand", CODE_FOR_xfxx_simple, B_UID(193) }; +static const struct builtin B21_vec_vand = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:21", "*vand", CODE_FOR_xfxx_simple, B_UID(194) }; +static const struct builtin B22_vec_vand = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:22", "*vand", CODE_FOR_xfxx_simple, B_UID(195) }; +static const struct builtin B23_vec_vand = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:23", "*vand", CODE_FOR_xfxx_simple, B_UID(196) }; +static const struct builtin B24_vec_vand = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:24", "*vand", CODE_FOR_xfxx_simple, B_UID(197) }; +static const struct builtin B1_vec_vandc = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 1, "vec_vandc:1", "*vandc", CODE_FOR_xfxx_simple, B_UID(198) }; +static const struct builtin B2_vec_vandc = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:2", "*vandc", CODE_FOR_xfxx_simple, B_UID(199) }; +static const struct builtin B3_vec_vandc = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:3", "*vandc", CODE_FOR_xfxx_simple, B_UID(200) }; +static const struct builtin B4_vec_vandc = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 1, "vec_vandc:4", "*vandc", CODE_FOR_xfxx_simple, B_UID(201) }; +static const struct builtin B5_vec_vandc = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:5", "*vandc", CODE_FOR_xfxx_simple, B_UID(202) }; +static const struct builtin B6_vec_vandc = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:6", "*vandc", CODE_FOR_xfxx_simple, B_UID(203) }; +static const struct builtin B7_vec_vandc = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:7", "*vandc", CODE_FOR_xfxx_simple, B_UID(204) }; +static const struct builtin B8_vec_vandc = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 1, "vec_vandc:8", "*vandc", CODE_FOR_xfxx_simple, B_UID(205) }; +static const struct builtin B9_vec_vandc = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:9", "*vandc", CODE_FOR_xfxx_simple, B_UID(206) }; +static const struct builtin B10_vec_vandc = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:10", "*vandc", CODE_FOR_xfxx_simple, B_UID(207) }; +static const struct builtin B11_vec_vandc = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:11", "*vandc", CODE_FOR_xfxx_simple, B_UID(208) }; +static const struct builtin B12_vec_vandc = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:12", "*vandc", CODE_FOR_xfxx_simple, B_UID(209) }; +static const struct builtin B13_vec_vandc = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:13", "*vandc", CODE_FOR_xfxx_simple, B_UID(210) }; +static const struct builtin B14_vec_vandc = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:14", "*vandc", CODE_FOR_xfxx_simple, B_UID(211) }; +static const struct builtin B15_vec_vandc = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:15", "*vandc", CODE_FOR_xfxx_simple, B_UID(212) }; +static const struct builtin B16_vec_vandc = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:16", "*vandc", CODE_FOR_xfxx_simple, B_UID(213) }; +static const struct builtin B17_vec_vandc = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:17", "*vandc", CODE_FOR_xfxx_simple, B_UID(214) }; +static const struct builtin B18_vec_vandc = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:18", "*vandc", CODE_FOR_xfxx_simple, B_UID(215) }; +static const struct builtin B19_vec_vandc = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:19", "*vandc", CODE_FOR_xfxx_simple, B_UID(216) }; +static const struct builtin B20_vec_vandc = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:20", "*vandc", CODE_FOR_xfxx_simple, B_UID(217) }; +static const struct builtin B21_vec_vandc = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:21", "*vandc", CODE_FOR_xfxx_simple, B_UID(218) }; +static const struct builtin B22_vec_vandc = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:22", "*vandc", CODE_FOR_xfxx_simple, B_UID(219) }; +static const struct builtin B23_vec_vandc = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:23", "*vandc", CODE_FOR_xfxx_simple, B_UID(220) }; +static const struct builtin B24_vec_vandc = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:24", "*vandc", CODE_FOR_xfxx_simple, B_UID(221) }; +static const struct builtin B1_vec_any_eq = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:1", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(222) }; +static const struct builtin B2_vec_any_eq = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:2", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(223) }; +static const struct builtin B3_vec_any_eq = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:3", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(224) }; +static const struct builtin B4_vec_any_eq = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:4", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(225) }; +static const struct builtin B5_vec_any_eq = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:5", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(226) }; +static const struct builtin B6_vec_any_eq = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:6", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(227) }; +static const struct builtin B7_vec_any_eq = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:7", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(228) }; +static const struct builtin B8_vec_any_eq = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:8", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(229) }; +static const struct builtin B9_vec_any_eq = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:9", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(230) }; +static const struct builtin B10_vec_any_eq = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:10", "*vcmpeqfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(231) }; +static const struct builtin B11_vec_any_eq = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:11", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(232) }; +static const struct builtin B12_vec_any_eq = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:12", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(233) }; +static const struct builtin B13_vec_any_eq = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:13", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(234) }; +static const struct builtin B14_vec_any_eq = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:14", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(235) }; +static const struct builtin B15_vec_any_eq = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:15", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(236) }; +static const struct builtin B16_vec_any_eq = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:16", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(237) }; +static const struct builtin B17_vec_any_eq = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:17", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(238) }; +static const struct builtin B18_vec_any_eq = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:18", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(239) }; +static const struct builtin B19_vec_any_eq = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:19", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(240) }; +static const struct builtin B20_vec_any_eq = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:20", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(241) }; +static const struct builtin B21_vec_any_eq = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:21", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(242) }; +static const struct builtin B22_vec_any_eq = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:22", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(243) }; +static const struct builtin B23_vec_any_eq = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:23", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(244) }; +static const struct builtin B1_vec_any_ge = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:1", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(245) }; +static const struct builtin B2_vec_any_ge = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:2", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(246) }; +static const struct builtin B3_vec_any_ge = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:3", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(247) }; +static const struct builtin B4_vec_any_ge = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:4", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(248) }; +static const struct builtin B5_vec_any_ge = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:5", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(249) }; +static const struct builtin B6_vec_any_ge = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:6", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(250) }; +static const struct builtin B7_vec_any_ge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_ge:7", "*vcmpgefp.", CODE_FOR_j_26_f_fxx_simple, B_UID(251) }; +static const struct builtin B8_vec_any_ge = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:8", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(252) }; +static const struct builtin B9_vec_any_ge = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:9", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(253) }; +static const struct builtin B10_vec_any_ge = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:10", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(254) }; +static const struct builtin B11_vec_any_ge = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:11", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(255) }; +static const struct builtin B12_vec_any_ge = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:12", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(256) }; +static const struct builtin B13_vec_any_ge = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:13", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(257) }; +static const struct builtin B14_vec_any_ge = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:14", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(258) }; +static const struct builtin B15_vec_any_ge = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:15", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(259) }; +static const struct builtin B16_vec_any_ge = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:16", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(260) }; +static const struct builtin B17_vec_any_ge = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:17", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(261) }; +static const struct builtin B18_vec_any_ge = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:18", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(262) }; +static const struct builtin B19_vec_any_ge = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:19", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(263) }; +static const struct builtin B1_vec_any_gt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:1", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(264) }; +static const struct builtin B2_vec_any_gt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:2", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(265) }; +static const struct builtin B3_vec_any_gt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:3", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(266) }; +static const struct builtin B4_vec_any_gt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:4", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(267) }; +static const struct builtin B5_vec_any_gt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:5", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(268) }; +static const struct builtin B6_vec_any_gt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:6", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(269) }; +static const struct builtin B7_vec_any_gt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:7", "*vcmpgtfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(270) }; +static const struct builtin B8_vec_any_gt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:8", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(271) }; +static const struct builtin B9_vec_any_gt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:9", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(272) }; +static const struct builtin B10_vec_any_gt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:10", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(273) }; +static const struct builtin B11_vec_any_gt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:11", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(274) }; +static const struct builtin B12_vec_any_gt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:12", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(275) }; +static const struct builtin B13_vec_any_gt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:13", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(276) }; +static const struct builtin B14_vec_any_gt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:14", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(277) }; +static const struct builtin B15_vec_any_gt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:15", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(278) }; +static const struct builtin B16_vec_any_gt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:16", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(279) }; +static const struct builtin B17_vec_any_gt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:17", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(280) }; +static const struct builtin B18_vec_any_gt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:18", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(281) }; +static const struct builtin B19_vec_any_gt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:19", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(282) }; +static const struct builtin B1_vec_any_le = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:1", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(283) }; +static const struct builtin B2_vec_any_le = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:2", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(284) }; +static const struct builtin B3_vec_any_le = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:3", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(285) }; +static const struct builtin B4_vec_any_le = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:4", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(286) }; +static const struct builtin B5_vec_any_le = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:5", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(287) }; +static const struct builtin B6_vec_any_le = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:6", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(288) }; +static const struct builtin B7_vec_any_le = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_le:7", "*vcmpgefp.", CODE_FOR_j_26_f_frxx_simple, B_UID(289) }; +static const struct builtin B8_vec_any_le = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:8", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(290) }; +static const struct builtin B9_vec_any_le = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:9", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(291) }; +static const struct builtin B10_vec_any_le = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:10", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(292) }; +static const struct builtin B11_vec_any_le = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:11", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(293) }; +static const struct builtin B12_vec_any_le = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:12", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(294) }; +static const struct builtin B13_vec_any_le = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:13", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(295) }; +static const struct builtin B14_vec_any_le = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:14", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(296) }; +static const struct builtin B15_vec_any_le = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:15", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(297) }; +static const struct builtin B16_vec_any_le = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:16", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(298) }; +static const struct builtin B17_vec_any_le = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:17", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(299) }; +static const struct builtin B18_vec_any_le = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:18", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(300) }; +static const struct builtin B19_vec_any_le = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:19", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(301) }; +static const struct builtin B1_vec_any_lt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:1", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(302) }; +static const struct builtin B2_vec_any_lt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:2", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(303) }; +static const struct builtin B3_vec_any_lt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:3", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(304) }; +static const struct builtin B4_vec_any_lt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:4", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(305) }; +static const struct builtin B5_vec_any_lt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:5", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(306) }; +static const struct builtin B6_vec_any_lt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:6", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(307) }; +static const struct builtin B7_vec_any_lt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:7", "*vcmpgtfp.", CODE_FOR_j_26_f_frxx_simple, B_UID(308) }; +static const struct builtin B8_vec_any_lt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:8", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(309) }; +static const struct builtin B9_vec_any_lt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:9", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(310) }; +static const struct builtin B10_vec_any_lt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:10", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(311) }; +static const struct builtin B11_vec_any_lt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:11", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(312) }; +static const struct builtin B12_vec_any_lt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:12", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(313) }; +static const struct builtin B13_vec_any_lt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:13", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(314) }; +static const struct builtin B14_vec_any_lt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:14", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(315) }; +static const struct builtin B15_vec_any_lt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:15", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(316) }; +static const struct builtin B16_vec_any_lt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:16", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(317) }; +static const struct builtin B17_vec_any_lt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:17", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(318) }; +static const struct builtin B18_vec_any_lt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:18", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(319) }; +static const struct builtin B19_vec_any_lt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:19", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(320) }; +static const struct builtin B_vec_any_nan = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc24fd, 1, FALSE, FALSE, 0, "vec_any_nan", "*vcmpeqfp.", CODE_FOR_j_24_f_fx_simple, B_UID(321) }; +static const struct builtin B1_vec_any_ne = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:1", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(322) }; +static const struct builtin B2_vec_any_ne = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:2", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(323) }; +static const struct builtin B3_vec_any_ne = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:3", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(324) }; +static const struct builtin B4_vec_any_ne = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:4", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(325) }; +static const struct builtin B5_vec_any_ne = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:5", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(326) }; +static const struct builtin B6_vec_any_ne = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:6", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(327) }; +static const struct builtin B7_vec_any_ne = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:7", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(328) }; +static const struct builtin B8_vec_any_ne = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:8", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(329) }; +static const struct builtin B9_vec_any_ne = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:9", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(330) }; +static const struct builtin B10_vec_any_ne = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:10", "*vcmpeqfp.", CODE_FOR_j_24_f_fxx_simple, B_UID(331) }; +static const struct builtin B11_vec_any_ne = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:11", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(332) }; +static const struct builtin B12_vec_any_ne = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:12", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(333) }; +static const struct builtin B13_vec_any_ne = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:13", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(334) }; +static const struct builtin B14_vec_any_ne = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:14", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(335) }; +static const struct builtin B15_vec_any_ne = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:15", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(336) }; +static const struct builtin B16_vec_any_ne = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:16", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(337) }; +static const struct builtin B17_vec_any_ne = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:17", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(338) }; +static const struct builtin B18_vec_any_ne = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:18", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(339) }; +static const struct builtin B19_vec_any_ne = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:19", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(340) }; +static const struct builtin B20_vec_any_ne = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:20", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(341) }; +static const struct builtin B21_vec_any_ne = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:21", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(342) }; +static const struct builtin B22_vec_any_ne = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:22", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(343) }; +static const struct builtin B23_vec_any_ne = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:23", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(344) }; +static const struct builtin B_vec_any_nge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_nge", "*vcmpgefp.", CODE_FOR_j_24_f_fxx_simple, B_UID(345) }; +static const struct builtin B_vec_any_ngt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ngt", "*vcmpgtfp.", CODE_FOR_j_24_f_fxx_simple, B_UID(346) }; +static const struct builtin B_vec_any_nle = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_nle", "*vcmpgefp.", CODE_FOR_j_24_f_frxx_simple, B_UID(347) }; +static const struct builtin B_vec_any_nlt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_nlt", "*vcmpgtfp.", CODE_FOR_j_24_f_frxx_simple, B_UID(348) }; +static const struct builtin B_vec_any_numeric = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc26fd, 1, FALSE, FALSE, 0, "vec_any_numeric", "*vcmpeqfp.", CODE_FOR_j_26_f_fx_simple, B_UID(349) }; +static const struct builtin B_vec_any_out = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_out", "*vcmpbfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(350) }; +static const struct builtin B_vec_vavgsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vavgsh", "*vavgsh", CODE_FOR_xfxx_simple, B_UID(351) }; +static const struct builtin B_vec_vavgsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vavgsw", "*vavgsw", CODE_FOR_xfxx_simple, B_UID(352) }; +static const struct builtin B_vec_vavgsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vavgsb", "*vavgsb", CODE_FOR_xfxx_simple, B_UID(353) }; +static const struct builtin B_vec_vavguh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vavguh", "*vavguh", CODE_FOR_xfxx_simple, B_UID(354) }; +static const struct builtin B_vec_vavguw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vavguw", "*vavguw", CODE_FOR_xfxx_simple, B_UID(355) }; +static const struct builtin B_vec_vavgub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vavgub", "*vavgub", CODE_FOR_xfxx_simple, B_UID(356) }; +static const struct builtin B_vec_vrfip = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfip", "*vrfip", CODE_FOR_xfx_fp, B_UID(357) }; +static const struct builtin B_vec_vcmpbfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vcmpbfp", "*vcmpbfp", CODE_FOR_xfxx_simple, B_UID(358) }; +static const struct builtin B_vec_vcmpeqfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpeqfp", "*vcmpeqfp", CODE_FOR_xfxx_simple, B_UID(359) }; +static const struct builtin B1_vec_vcmpequh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 7, "vec_vcmpequh:1", "*vcmpequh", CODE_FOR_xfxx_simple, B_UID(360) }; +static const struct builtin B1_vec_vcmpequw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpequw:1", "*vcmpequw", CODE_FOR_xfxx_simple, B_UID(361) }; +static const struct builtin B1_vec_vcmpequb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 7, "vec_vcmpequb:1", "*vcmpequb", CODE_FOR_xfxx_simple, B_UID(362) }; +static const struct builtin B2_vec_vcmpequh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 7, "vec_vcmpequh:2", "*vcmpequh", CODE_FOR_xfxx_simple, B_UID(363) }; +static const struct builtin B2_vec_vcmpequw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpequw:2", "*vcmpequw", CODE_FOR_xfxx_simple, B_UID(364) }; +static const struct builtin B2_vec_vcmpequb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 7, "vec_vcmpequb:2", "*vcmpequb", CODE_FOR_xfxx_simple, B_UID(365) }; +static const struct builtin B_vec_vcmpgefp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgefp", "*vcmpgefp", CODE_FOR_xfxx_simple, B_UID(366) }; +static const struct builtin B_vec_vcmpgtfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtfp", "*vcmpgtfp", CODE_FOR_xfxx_simple, B_UID(367) }; +static const struct builtin B_vec_vcmpgtsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vcmpgtsh", "*vcmpgtsh", CODE_FOR_xfxx_simple, B_UID(368) }; +static const struct builtin B_vec_vcmpgtsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtsw", "*vcmpgtsw", CODE_FOR_xfxx_simple, B_UID(369) }; +static const struct builtin B_vec_vcmpgtsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vcmpgtsb", "*vcmpgtsb", CODE_FOR_xfxx_simple, B_UID(370) }; +static const struct builtin B_vec_vcmpgtuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vcmpgtuh", "*vcmpgtuh", CODE_FOR_xfxx_simple, B_UID(371) }; +static const struct builtin B_vec_vcmpgtuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtuw", "*vcmpgtuw", CODE_FOR_xfxx_simple, B_UID(372) }; +static const struct builtin B_vec_vcmpgtub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vcmpgtub", "*vcmpgtub", CODE_FOR_xfxx_simple, B_UID(373) }; +static const struct builtin B_vec_cmple = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmple", "*vcmpgefp", CODE_FOR_xfxx_simple, B_UID(374) }; +static const struct builtin B1_vec_cmplt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:1", "*vcmpgtfp", CODE_FOR_xfxx_simple, B_UID(375) }; +static const struct builtin B2_vec_cmplt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 10, "vec_cmplt:2", "*vcmpgtsh", CODE_FOR_xfxx_simple, B_UID(376) }; +static const struct builtin B3_vec_cmplt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:3", "*vcmpgtsw", CODE_FOR_xfxx_simple, B_UID(377) }; +static const struct builtin B4_vec_cmplt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 10, "vec_cmplt:4", "*vcmpgtsb", CODE_FOR_xfxx_simple, B_UID(378) }; +static const struct builtin B5_vec_cmplt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 10, "vec_cmplt:5", "*vcmpgtuh", CODE_FOR_xfxx_simple, B_UID(379) }; +static const struct builtin B6_vec_cmplt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:6", "*vcmpgtuw", CODE_FOR_xfxx_simple, B_UID(380) }; +static const struct builtin B7_vec_cmplt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 10, "vec_cmplt:7", "*vcmpgtub", CODE_FOR_xfxx_simple, B_UID(381) }; +static const struct builtin B_vec_vcfsx = { { &T_vec_s32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vcfsx", "*vcfsx", CODE_FOR_xfxB_fp, B_UID(382) }; +static const struct builtin B_vec_vcfux = { { &T_vec_u32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vcfux", "*vcfux", CODE_FOR_xfxB_fp, B_UID(383) }; +static const struct builtin B_vec_vctsxs = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vctsxs", "*vctsxs", CODE_FOR_xfxB_fp, B_UID(384) }; +static const struct builtin B_vec_vctuxs = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vctuxs", "*vctuxs", CODE_FOR_xfxB_fp, B_UID(385) }; +static const struct builtin B_vec_dss = { { &T_immed_u2, NULL, NULL, }, "D", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_dss", "*dss", CODE_FOR_vlfD_load, B_UID(386) }; +static const struct builtin B_vec_dssall = { { NULL, NULL, NULL, }, "", &T_volatile_void, 0, FALSE, FALSE, 0, "vec_dssall", "*dssall", CODE_FOR_vlf_load, B_UID(387) }; +static const struct builtin B1_vec_dst = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:1", "*dst", CODE_FOR_vlfiiD_load, B_UID(388) }; +static const struct builtin B2_vec_dst = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:2", "*dst", CODE_FOR_vlfiiD_load, B_UID(389) }; +static const struct builtin B3_vec_dst = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:3", "*dst", CODE_FOR_vlfiiD_load, B_UID(390) }; +static const struct builtin B4_vec_dst = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:4", "*dst", CODE_FOR_vlfiiD_load, B_UID(391) }; +static const struct builtin B5_vec_dst = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:5", "*dst", CODE_FOR_vlfiiD_load, B_UID(392) }; +static const struct builtin B6_vec_dst = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:6", "*dst", CODE_FOR_vlfiiD_load, B_UID(393) }; +static const struct builtin B7_vec_dst = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:7", "*dst", CODE_FOR_vlfiiD_load, B_UID(394) }; +static const struct builtin B8_vec_dst = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:8", "*dst", CODE_FOR_vlfiiD_load, B_UID(395) }; +static const struct builtin B9_vec_dst = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:9", "*dst", CODE_FOR_vlfiiD_load, B_UID(396) }; +static const struct builtin B10_vec_dst = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:10", "*dst", CODE_FOR_vlfiiD_load, B_UID(397) }; +static const struct builtin B11_vec_dst = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:11", "*dst", CODE_FOR_vlfiiD_load, B_UID(398) }; +static const struct builtin B12_vec_dst = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:12", "*dst", CODE_FOR_vlfiiD_load, B_UID(399) }; +static const struct builtin B13_vec_dst = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:13", "*dst", CODE_FOR_vlfiiD_load, B_UID(400) }; +static const struct builtin B14_vec_dst = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:14", "*dst", CODE_FOR_vlfiiD_load, B_UID(401) }; +static const struct builtin B15_vec_dst = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:15", "*dst", CODE_FOR_vlfiiD_load, B_UID(402) }; +static const struct builtin B16_vec_dst = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:16", "*dst", CODE_FOR_vlfiiD_load, B_UID(403) }; +static const struct builtin B17_vec_dst = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:17", "*dst", CODE_FOR_vlfiiD_load, B_UID(404) }; +static const struct builtin B18_vec_dst = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:18", "*dst", CODE_FOR_vlfiiD_load, B_UID(405) }; +static const struct builtin B19_vec_dst = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:19", "*dst", CODE_FOR_vlfiiD_load, B_UID(406) }; +static const struct builtin B20_vec_dst = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:20", "*dst", CODE_FOR_vlfiiD_load, B_UID(407) }; +static const struct builtin B1_vec_dstst = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:1", "*dstst", CODE_FOR_vlfiiD_load, B_UID(408) }; +static const struct builtin B2_vec_dstst = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:2", "*dstst", CODE_FOR_vlfiiD_load, B_UID(409) }; +static const struct builtin B3_vec_dstst = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:3", "*dstst", CODE_FOR_vlfiiD_load, B_UID(410) }; +static const struct builtin B4_vec_dstst = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:4", "*dstst", CODE_FOR_vlfiiD_load, B_UID(411) }; +static const struct builtin B5_vec_dstst = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:5", "*dstst", CODE_FOR_vlfiiD_load, B_UID(412) }; +static const struct builtin B6_vec_dstst = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:6", "*dstst", CODE_FOR_vlfiiD_load, B_UID(413) }; +static const struct builtin B7_vec_dstst = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:7", "*dstst", CODE_FOR_vlfiiD_load, B_UID(414) }; +static const struct builtin B8_vec_dstst = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:8", "*dstst", CODE_FOR_vlfiiD_load, B_UID(415) }; +static const struct builtin B9_vec_dstst = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:9", "*dstst", CODE_FOR_vlfiiD_load, B_UID(416) }; +static const struct builtin B10_vec_dstst = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:10", "*dstst", CODE_FOR_vlfiiD_load, B_UID(417) }; +static const struct builtin B11_vec_dstst = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:11", "*dstst", CODE_FOR_vlfiiD_load, B_UID(418) }; +static const struct builtin B12_vec_dstst = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:12", "*dstst", CODE_FOR_vlfiiD_load, B_UID(419) }; +static const struct builtin B13_vec_dstst = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:13", "*dstst", CODE_FOR_vlfiiD_load, B_UID(420) }; +static const struct builtin B14_vec_dstst = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:14", "*dstst", CODE_FOR_vlfiiD_load, B_UID(421) }; +static const struct builtin B15_vec_dstst = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:15", "*dstst", CODE_FOR_vlfiiD_load, B_UID(422) }; +static const struct builtin B16_vec_dstst = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:16", "*dstst", CODE_FOR_vlfiiD_load, B_UID(423) }; +static const struct builtin B17_vec_dstst = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:17", "*dstst", CODE_FOR_vlfiiD_load, B_UID(424) }; +static const struct builtin B18_vec_dstst = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:18", "*dstst", CODE_FOR_vlfiiD_load, B_UID(425) }; +static const struct builtin B19_vec_dstst = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:19", "*dstst", CODE_FOR_vlfiiD_load, B_UID(426) }; +static const struct builtin B20_vec_dstst = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:20", "*dstst", CODE_FOR_vlfiiD_load, B_UID(427) }; +static const struct builtin B1_vec_dststt = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:1", "*dststt", CODE_FOR_vlfiiD_load, B_UID(428) }; +static const struct builtin B2_vec_dststt = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:2", "*dststt", CODE_FOR_vlfiiD_load, B_UID(429) }; +static const struct builtin B3_vec_dststt = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:3", "*dststt", CODE_FOR_vlfiiD_load, B_UID(430) }; +static const struct builtin B4_vec_dststt = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:4", "*dststt", CODE_FOR_vlfiiD_load, B_UID(431) }; +static const struct builtin B5_vec_dststt = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:5", "*dststt", CODE_FOR_vlfiiD_load, B_UID(432) }; +static const struct builtin B6_vec_dststt = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:6", "*dststt", CODE_FOR_vlfiiD_load, B_UID(433) }; +static const struct builtin B7_vec_dststt = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:7", "*dststt", CODE_FOR_vlfiiD_load, B_UID(434) }; +static const struct builtin B8_vec_dststt = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:8", "*dststt", CODE_FOR_vlfiiD_load, B_UID(435) }; +static const struct builtin B9_vec_dststt = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:9", "*dststt", CODE_FOR_vlfiiD_load, B_UID(436) }; +static const struct builtin B10_vec_dststt = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:10", "*dststt", CODE_FOR_vlfiiD_load, B_UID(437) }; +static const struct builtin B11_vec_dststt = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:11", "*dststt", CODE_FOR_vlfiiD_load, B_UID(438) }; +static const struct builtin B12_vec_dststt = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:12", "*dststt", CODE_FOR_vlfiiD_load, B_UID(439) }; +static const struct builtin B13_vec_dststt = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:13", "*dststt", CODE_FOR_vlfiiD_load, B_UID(440) }; +static const struct builtin B14_vec_dststt = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:14", "*dststt", CODE_FOR_vlfiiD_load, B_UID(441) }; +static const struct builtin B15_vec_dststt = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:15", "*dststt", CODE_FOR_vlfiiD_load, B_UID(442) }; +static const struct builtin B16_vec_dststt = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:16", "*dststt", CODE_FOR_vlfiiD_load, B_UID(443) }; +static const struct builtin B17_vec_dststt = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:17", "*dststt", CODE_FOR_vlfiiD_load, B_UID(444) }; +static const struct builtin B18_vec_dststt = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:18", "*dststt", CODE_FOR_vlfiiD_load, B_UID(445) }; +static const struct builtin B19_vec_dststt = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:19", "*dststt", CODE_FOR_vlfiiD_load, B_UID(446) }; +static const struct builtin B20_vec_dststt = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:20", "*dststt", CODE_FOR_vlfiiD_load, B_UID(447) }; +static const struct builtin B1_vec_dstt = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:1", "*dstt", CODE_FOR_vlfiiD_load, B_UID(448) }; +static const struct builtin B2_vec_dstt = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:2", "*dstt", CODE_FOR_vlfiiD_load, B_UID(449) }; +static const struct builtin B3_vec_dstt = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:3", "*dstt", CODE_FOR_vlfiiD_load, B_UID(450) }; +static const struct builtin B4_vec_dstt = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:4", "*dstt", CODE_FOR_vlfiiD_load, B_UID(451) }; +static const struct builtin B5_vec_dstt = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:5", "*dstt", CODE_FOR_vlfiiD_load, B_UID(452) }; +static const struct builtin B6_vec_dstt = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:6", "*dstt", CODE_FOR_vlfiiD_load, B_UID(453) }; +static const struct builtin B7_vec_dstt = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:7", "*dstt", CODE_FOR_vlfiiD_load, B_UID(454) }; +static const struct builtin B8_vec_dstt = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:8", "*dstt", CODE_FOR_vlfiiD_load, B_UID(455) }; +static const struct builtin B9_vec_dstt = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:9", "*dstt", CODE_FOR_vlfiiD_load, B_UID(456) }; +static const struct builtin B10_vec_dstt = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:10", "*dstt", CODE_FOR_vlfiiD_load, B_UID(457) }; +static const struct builtin B11_vec_dstt = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:11", "*dstt", CODE_FOR_vlfiiD_load, B_UID(458) }; +static const struct builtin B12_vec_dstt = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:12", "*dstt", CODE_FOR_vlfiiD_load, B_UID(459) }; +static const struct builtin B13_vec_dstt = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:13", "*dstt", CODE_FOR_vlfiiD_load, B_UID(460) }; +static const struct builtin B14_vec_dstt = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:14", "*dstt", CODE_FOR_vlfiiD_load, B_UID(461) }; +static const struct builtin B15_vec_dstt = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:15", "*dstt", CODE_FOR_vlfiiD_load, B_UID(462) }; +static const struct builtin B16_vec_dstt = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:16", "*dstt", CODE_FOR_vlfiiD_load, B_UID(463) }; +static const struct builtin B17_vec_dstt = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:17", "*dstt", CODE_FOR_vlfiiD_load, B_UID(464) }; +static const struct builtin B18_vec_dstt = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:18", "*dstt", CODE_FOR_vlfiiD_load, B_UID(465) }; +static const struct builtin B19_vec_dstt = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:19", "*dstt", CODE_FOR_vlfiiD_load, B_UID(466) }; +static const struct builtin B20_vec_dstt = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:20", "*dstt", CODE_FOR_vlfiiD_load, B_UID(467) }; +static const struct builtin B_vec_vexptefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vexptefp", "*vexptefp", CODE_FOR_xfx_fp, B_UID(468) }; +static const struct builtin B_vec_vrfim = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfim", "*vrfim", CODE_FOR_xfx_fp, B_UID(469) }; +static const struct builtin B1_vec_lvx = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvx:1", "*lvx", CODE_FOR_xlfii_load, B_UID(470) }; +static const struct builtin B2_vec_lvx = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:2", "*lvx", CODE_FOR_xlfii_load, B_UID(471) }; +static const struct builtin B3_vec_lvx = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:3", "*lvx", CODE_FOR_xlfii_load, B_UID(472) }; +static const struct builtin B4_vec_lvx = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvx:4", "*lvx", CODE_FOR_xlfii_load, B_UID(473) }; +static const struct builtin B5_vec_lvx = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvx:5", "*lvx", CODE_FOR_xlfii_load, B_UID(474) }; +static const struct builtin B6_vec_lvx = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvx:6", "*lvx", CODE_FOR_xlfii_load, B_UID(475) }; +static const struct builtin B7_vec_lvx = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:7", "*lvx", CODE_FOR_xlfii_load, B_UID(476) }; +static const struct builtin B8_vec_lvx = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:8", "*lvx", CODE_FOR_xlfii_load, B_UID(477) }; +static const struct builtin B9_vec_lvx = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvx:9", "*lvx", CODE_FOR_xlfii_load, B_UID(478) }; +static const struct builtin B10_vec_lvx = { { &T_int, &T_const_vec_b16_ptr, NULL, }, "ii", &T_vec_b16, 2, TRUE, FALSE, 0, "vec_lvx:10", "*lvx", CODE_FOR_xlfii_load, B_UID(479) }; +static const struct builtin B11_vec_lvx = { { &T_int, &T_const_vec_b32_ptr, NULL, }, "ii", &T_vec_b32, 2, TRUE, FALSE, 0, "vec_lvx:11", "*lvx", CODE_FOR_xlfii_load, B_UID(480) }; +static const struct builtin B12_vec_lvx = { { &T_int, &T_const_vec_b8_ptr, NULL, }, "ii", &T_vec_b8, 2, TRUE, FALSE, 0, "vec_lvx:12", "*lvx", CODE_FOR_xlfii_load, B_UID(481) }; +static const struct builtin B13_vec_lvx = { { &T_int, &T_const_vec_f32_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvx:13", "*lvx", CODE_FOR_xlfii_load, B_UID(482) }; +static const struct builtin B14_vec_lvx = { { &T_int, &T_const_vec_p16_ptr, NULL, }, "ii", &T_vec_p16, 2, TRUE, FALSE, 0, "vec_lvx:14", "*lvx", CODE_FOR_xlfii_load, B_UID(483) }; +static const struct builtin B15_vec_lvx = { { &T_int, &T_const_vec_s16_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvx:15", "*lvx", CODE_FOR_xlfii_load, B_UID(484) }; +static const struct builtin B16_vec_lvx = { { &T_int, &T_const_vec_s32_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:16", "*lvx", CODE_FOR_xlfii_load, B_UID(485) }; +static const struct builtin B17_vec_lvx = { { &T_int, &T_const_vec_s8_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvx:17", "*lvx", CODE_FOR_xlfii_load, B_UID(486) }; +static const struct builtin B18_vec_lvx = { { &T_int, &T_const_vec_u16_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvx:18", "*lvx", CODE_FOR_xlfii_load, B_UID(487) }; +static const struct builtin B19_vec_lvx = { { &T_int, &T_const_vec_u32_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:19", "*lvx", CODE_FOR_xlfii_load, B_UID(488) }; +static const struct builtin B20_vec_lvx = { { &T_int, &T_const_vec_u8_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvx:20", "*lvx", CODE_FOR_xlfii_load, B_UID(489) }; +static const struct builtin B1_vec_lvewx = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvewx:1", "*lvewx", CODE_FOR_xlfii_load, B_UID(490) }; +static const struct builtin B2_vec_lvewx = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvewx:2", "*lvewx", CODE_FOR_xlfii_load, B_UID(491) }; +static const struct builtin B3_vec_lvewx = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvewx:3", "*lvewx", CODE_FOR_xlfii_load, B_UID(492) }; +static const struct builtin B1_vec_lvehx = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvehx:1", "*lvehx", CODE_FOR_xlfii_load, B_UID(493) }; +static const struct builtin B1_vec_lvebx = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvebx:1", "*lvebx", CODE_FOR_xlfii_load, B_UID(494) }; +static const struct builtin B2_vec_lvebx = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvebx:2", "*lvebx", CODE_FOR_xlfii_load, B_UID(495) }; +static const struct builtin B4_vec_lvewx = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvewx:4", "*lvewx", CODE_FOR_xlfii_load, B_UID(496) }; +static const struct builtin B5_vec_lvewx = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvewx:5", "*lvewx", CODE_FOR_xlfii_load, B_UID(497) }; +static const struct builtin B2_vec_lvehx = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvehx:2", "*lvehx", CODE_FOR_xlfii_load, B_UID(498) }; +static const struct builtin B1_vec_lvxl = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvxl:1", "*lvxl", CODE_FOR_xlfii_load, B_UID(499) }; +static const struct builtin B2_vec_lvxl = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:2", "*lvxl", CODE_FOR_xlfii_load, B_UID(500) }; +static const struct builtin B3_vec_lvxl = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:3", "*lvxl", CODE_FOR_xlfii_load, B_UID(501) }; +static const struct builtin B4_vec_lvxl = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvxl:4", "*lvxl", CODE_FOR_xlfii_load, B_UID(502) }; +static const struct builtin B5_vec_lvxl = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvxl:5", "*lvxl", CODE_FOR_xlfii_load, B_UID(503) }; +static const struct builtin B6_vec_lvxl = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvxl:6", "*lvxl", CODE_FOR_xlfii_load, B_UID(504) }; +static const struct builtin B7_vec_lvxl = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:7", "*lvxl", CODE_FOR_xlfii_load, B_UID(505) }; +static const struct builtin B8_vec_lvxl = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:8", "*lvxl", CODE_FOR_xlfii_load, B_UID(506) }; +static const struct builtin B9_vec_lvxl = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvxl:9", "*lvxl", CODE_FOR_xlfii_load, B_UID(507) }; +static const struct builtin B10_vec_lvxl = { { &T_int, &T_const_vec_b16_ptr, NULL, }, "ii", &T_vec_b16, 2, TRUE, FALSE, 0, "vec_lvxl:10", "*lvxl", CODE_FOR_xlfii_load, B_UID(508) }; +static const struct builtin B11_vec_lvxl = { { &T_int, &T_const_vec_b32_ptr, NULL, }, "ii", &T_vec_b32, 2, TRUE, FALSE, 0, "vec_lvxl:11", "*lvxl", CODE_FOR_xlfii_load, B_UID(509) }; +static const struct builtin B12_vec_lvxl = { { &T_int, &T_const_vec_b8_ptr, NULL, }, "ii", &T_vec_b8, 2, TRUE, FALSE, 0, "vec_lvxl:12", "*lvxl", CODE_FOR_xlfii_load, B_UID(510) }; +static const struct builtin B13_vec_lvxl = { { &T_int, &T_const_vec_f32_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvxl:13", "*lvxl", CODE_FOR_xlfii_load, B_UID(511) }; +static const struct builtin B14_vec_lvxl = { { &T_int, &T_const_vec_p16_ptr, NULL, }, "ii", &T_vec_p16, 2, TRUE, FALSE, 0, "vec_lvxl:14", "*lvxl", CODE_FOR_xlfii_load, B_UID(512) }; +static const struct builtin B15_vec_lvxl = { { &T_int, &T_const_vec_s16_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvxl:15", "*lvxl", CODE_FOR_xlfii_load, B_UID(513) }; +static const struct builtin B16_vec_lvxl = { { &T_int, &T_const_vec_s32_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:16", "*lvxl", CODE_FOR_xlfii_load, B_UID(514) }; +static const struct builtin B17_vec_lvxl = { { &T_int, &T_const_vec_s8_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvxl:17", "*lvxl", CODE_FOR_xlfii_load, B_UID(515) }; +static const struct builtin B18_vec_lvxl = { { &T_int, &T_const_vec_u16_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvxl:18", "*lvxl", CODE_FOR_xlfii_load, B_UID(516) }; +static const struct builtin B19_vec_lvxl = { { &T_int, &T_const_vec_u32_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:19", "*lvxl", CODE_FOR_xlfii_load, B_UID(517) }; +static const struct builtin B20_vec_lvxl = { { &T_int, &T_const_vec_u8_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvxl:20", "*lvxl", CODE_FOR_xlfii_load, B_UID(518) }; +static const struct builtin B_vec_vlogefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vlogefp", "*vlogefp", CODE_FOR_xfx_fp, B_UID(519) }; +static const struct builtin B1_vec_lvsl = { { &T_int, &T_const_volatile_float_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:1", "*lvsl", CODE_FOR_xfii_load, B_UID(520) }; +static const struct builtin B2_vec_lvsl = { { &T_int, &T_const_volatile_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:2", "*lvsl", CODE_FOR_xfii_load, B_UID(521) }; +static const struct builtin B3_vec_lvsl = { { &T_int, &T_const_volatile_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:3", "*lvsl", CODE_FOR_xfii_load, B_UID(522) }; +static const struct builtin B4_vec_lvsl = { { &T_int, &T_const_volatile_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:4", "*lvsl", CODE_FOR_xfii_load, B_UID(523) }; +static const struct builtin B5_vec_lvsl = { { &T_int, &T_const_volatile_signed_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:5", "*lvsl", CODE_FOR_xfii_load, B_UID(524) }; +static const struct builtin B6_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:6", "*lvsl", CODE_FOR_xfii_load, B_UID(525) }; +static const struct builtin B7_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:7", "*lvsl", CODE_FOR_xfii_load, B_UID(526) }; +static const struct builtin B8_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:8", "*lvsl", CODE_FOR_xfii_load, B_UID(527) }; +static const struct builtin B9_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:9", "*lvsl", CODE_FOR_xfii_load, B_UID(528) }; +static const struct builtin B1_vec_lvsr = { { &T_int, &T_const_volatile_float_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:1", "*lvsr", CODE_FOR_xfii_load, B_UID(529) }; +static const struct builtin B2_vec_lvsr = { { &T_int, &T_const_volatile_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:2", "*lvsr", CODE_FOR_xfii_load, B_UID(530) }; +static const struct builtin B3_vec_lvsr = { { &T_int, &T_const_volatile_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:3", "*lvsr", CODE_FOR_xfii_load, B_UID(531) }; +static const struct builtin B4_vec_lvsr = { { &T_int, &T_const_volatile_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:4", "*lvsr", CODE_FOR_xfii_load, B_UID(532) }; +static const struct builtin B5_vec_lvsr = { { &T_int, &T_const_volatile_signed_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:5", "*lvsr", CODE_FOR_xfii_load, B_UID(533) }; +static const struct builtin B6_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:6", "*lvsr", CODE_FOR_xfii_load, B_UID(534) }; +static const struct builtin B7_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:7", "*lvsr", CODE_FOR_xfii_load, B_UID(535) }; +static const struct builtin B8_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:8", "*lvsr", CODE_FOR_xfii_load, B_UID(536) }; +static const struct builtin B9_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:9", "*lvsr", CODE_FOR_xfii_load, B_UID(537) }; +static const struct builtin B_vec_vmaddfp = { { &T_vec_f32, &T_vec_f32, &T_vec_f32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vmaddfp", "*vmaddfp", CODE_FOR_xfxxx_fp, B_UID(538) }; +static const struct builtin B_vec_vmhaddshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmhaddshs", "*vmhaddshs", CODE_FOR_xfxxx_complex, B_UID(539) }; +static const struct builtin B1_vec_vmaxsh = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:1", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(540) }; +static const struct builtin B1_vec_vmaxuh = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:1", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(541) }; +static const struct builtin B1_vec_vmaxsw = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:1", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(542) }; +static const struct builtin B1_vec_vmaxuw = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:1", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(543) }; +static const struct builtin B1_vec_vmaxsb = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:1", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(544) }; +static const struct builtin B1_vec_vmaxub = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:1", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(545) }; +static const struct builtin B_vec_vmaxfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vmaxfp", "*vmaxfp", CODE_FOR_xfxx_simple, B_UID(546) }; +static const struct builtin B2_vec_vmaxsh = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:2", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(547) }; +static const struct builtin B3_vec_vmaxsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:3", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(548) }; +static const struct builtin B2_vec_vmaxsw = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:2", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(549) }; +static const struct builtin B3_vec_vmaxsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:3", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(550) }; +static const struct builtin B2_vec_vmaxsb = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:2", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(551) }; +static const struct builtin B3_vec_vmaxsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:3", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(552) }; +static const struct builtin B2_vec_vmaxuh = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:2", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(553) }; +static const struct builtin B3_vec_vmaxuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:3", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(554) }; +static const struct builtin B2_vec_vmaxuw = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:2", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(555) }; +static const struct builtin B3_vec_vmaxuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:3", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(556) }; +static const struct builtin B2_vec_vmaxub = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:2", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(557) }; +static const struct builtin B3_vec_vmaxub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:3", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(558) }; +static const struct builtin B1_vec_vmrghh = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vmrghh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(559) }; +static const struct builtin B1_vec_vmrghw = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vmrghw:1", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(560) }; +static const struct builtin B1_vec_vmrghb = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vmrghb:1", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(561) }; +static const struct builtin B2_vec_vmrghw = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vmrghw:2", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(562) }; +static const struct builtin B2_vec_vmrghh = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vmrghh:2", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(563) }; +static const struct builtin B3_vec_vmrghh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmrghh:3", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(564) }; +static const struct builtin B3_vec_vmrghw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmrghw:3", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(565) }; +static const struct builtin B2_vec_vmrghb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vmrghb:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(566) }; +static const struct builtin B4_vec_vmrghh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmrghh:4", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(567) }; +static const struct builtin B4_vec_vmrghw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmrghw:4", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(568) }; +static const struct builtin B3_vec_vmrghb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vmrghb:3", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(569) }; +static const struct builtin B1_vec_vmrglh = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vmrglh:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(570) }; +static const struct builtin B1_vec_vmrglw = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vmrglw:1", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(571) }; +static const struct builtin B1_vec_vmrglb = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vmrglb:1", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(572) }; +static const struct builtin B2_vec_vmrglw = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vmrglw:2", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(573) }; +static const struct builtin B2_vec_vmrglh = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vmrglh:2", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(574) }; +static const struct builtin B3_vec_vmrglh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmrglh:3", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(575) }; +static const struct builtin B3_vec_vmrglw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmrglw:3", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(576) }; +static const struct builtin B2_vec_vmrglb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vmrglb:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(577) }; +static const struct builtin B4_vec_vmrglh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmrglh:4", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(578) }; +static const struct builtin B4_vec_vmrglw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmrglw:4", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(579) }; +static const struct builtin B3_vec_vmrglb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vmrglb:3", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(580) }; +static const struct builtin B_vec_mfvscr = { { NULL, NULL, NULL, }, "", &T_volatile_vec_u16, 0, FALSE, FALSE, 0, "vec_mfvscr", "*mfvscr", CODE_FOR_vxf_fxu, B_UID(581) }; +static const struct builtin B1_vec_vminsh = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:1", "*vminsh", CODE_FOR_xfxx_simple, B_UID(582) }; +static const struct builtin B1_vec_vminuh = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:1", "*vminuh", CODE_FOR_xfxx_simple, B_UID(583) }; +static const struct builtin B1_vec_vminsw = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:1", "*vminsw", CODE_FOR_xfxx_simple, B_UID(584) }; +static const struct builtin B1_vec_vminuw = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:1", "*vminuw", CODE_FOR_xfxx_simple, B_UID(585) }; +static const struct builtin B1_vec_vminsb = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:1", "*vminsb", CODE_FOR_xfxx_simple, B_UID(586) }; +static const struct builtin B1_vec_vminub = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:1", "*vminub", CODE_FOR_xfxx_simple, B_UID(587) }; +static const struct builtin B_vec_vminfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vminfp", "*vminfp", CODE_FOR_xfxx_simple, B_UID(588) }; +static const struct builtin B2_vec_vminsh = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:2", "*vminsh", CODE_FOR_xfxx_simple, B_UID(589) }; +static const struct builtin B3_vec_vminsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:3", "*vminsh", CODE_FOR_xfxx_simple, B_UID(590) }; +static const struct builtin B2_vec_vminsw = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:2", "*vminsw", CODE_FOR_xfxx_simple, B_UID(591) }; +static const struct builtin B3_vec_vminsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:3", "*vminsw", CODE_FOR_xfxx_simple, B_UID(592) }; +static const struct builtin B2_vec_vminsb = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:2", "*vminsb", CODE_FOR_xfxx_simple, B_UID(593) }; +static const struct builtin B3_vec_vminsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:3", "*vminsb", CODE_FOR_xfxx_simple, B_UID(594) }; +static const struct builtin B2_vec_vminuh = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:2", "*vminuh", CODE_FOR_xfxx_simple, B_UID(595) }; +static const struct builtin B3_vec_vminuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:3", "*vminuh", CODE_FOR_xfxx_simple, B_UID(596) }; +static const struct builtin B2_vec_vminuw = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:2", "*vminuw", CODE_FOR_xfxx_simple, B_UID(597) }; +static const struct builtin B3_vec_vminuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:3", "*vminuw", CODE_FOR_xfxx_simple, B_UID(598) }; +static const struct builtin B2_vec_vminub = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:2", "*vminub", CODE_FOR_xfxx_simple, B_UID(599) }; +static const struct builtin B3_vec_vminub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:3", "*vminub", CODE_FOR_xfxx_simple, B_UID(600) }; +static const struct builtin B1_vec_vmladduhm = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:1", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(601) }; +static const struct builtin B2_vec_vmladduhm = { { &T_vec_s16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:2", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(602) }; +static const struct builtin B3_vec_vmladduhm = { { &T_vec_u16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:3", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(603) }; +static const struct builtin B4_vec_vmladduhm = { { &T_vec_u16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vmladduhm:4", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(604) }; +static const struct builtin B_vec_vmhraddshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmhraddshs", "*vmhraddshs", CODE_FOR_xfxxx_complex, B_UID(605) }; +static const struct builtin B_vec_vmsumshm = { { &T_vec_s16, &T_vec_s16, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsumshm", "*vmsumshm", CODE_FOR_xfxxx_complex, B_UID(606) }; +static const struct builtin B_vec_vmsummbm = { { &T_vec_s8, &T_vec_u8, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsummbm", "*vmsummbm", CODE_FOR_xfxxx_complex, B_UID(607) }; +static const struct builtin B_vec_vmsumuhm = { { &T_vec_u16, &T_vec_u16, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumuhm", "*vmsumuhm", CODE_FOR_xfxxx_complex, B_UID(608) }; +static const struct builtin B_vec_vmsumubm = { { &T_vec_u8, &T_vec_u8, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumubm", "*vmsumubm", CODE_FOR_xfxxx_complex, B_UID(609) }; +static const struct builtin B_vec_vmsumshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsumshs", "*vmsumshs", CODE_FOR_xfxxx_complex, B_UID(610) }; +static const struct builtin B_vec_vmsumuhs = { { &T_vec_u16, &T_vec_u16, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumuhs", "*vmsumuhs", CODE_FOR_xfxxx_complex, B_UID(611) }; +static const struct builtin B1_vec_mtvscr = { { &T_vec_b16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:1", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(612) }; +static const struct builtin B2_vec_mtvscr = { { &T_vec_b32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:2", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(613) }; +static const struct builtin B3_vec_mtvscr = { { &T_vec_b8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:3", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(614) }; +static const struct builtin B4_vec_mtvscr = { { &T_vec_p16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:4", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(615) }; +static const struct builtin B5_vec_mtvscr = { { &T_vec_s16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:5", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(616) }; +static const struct builtin B6_vec_mtvscr = { { &T_vec_s32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:6", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(617) }; +static const struct builtin B7_vec_mtvscr = { { &T_vec_s8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:7", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(618) }; +static const struct builtin B8_vec_mtvscr = { { &T_vec_u16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:8", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(619) }; +static const struct builtin B9_vec_mtvscr = { { &T_vec_u32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:9", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(620) }; +static const struct builtin B10_vec_mtvscr = { { &T_vec_u8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:10", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(621) }; +static const struct builtin B_vec_vmulesh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmulesh", "*vmulesh", CODE_FOR_xfxx_complex, B_UID(622) }; +static const struct builtin B_vec_vmulesb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmulesb", "*vmulesb", CODE_FOR_xfxx_complex, B_UID(623) }; +static const struct builtin B_vec_vmuleuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmuleuh", "*vmuleuh", CODE_FOR_xfxx_complex, B_UID(624) }; +static const struct builtin B_vec_vmuleub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmuleub", "*vmuleub", CODE_FOR_xfxx_complex, B_UID(625) }; +static const struct builtin B_vec_vmulosh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmulosh", "*vmulosh", CODE_FOR_xfxx_complex, B_UID(626) }; +static const struct builtin B_vec_vmulosb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmulosb", "*vmulosb", CODE_FOR_xfxx_complex, B_UID(627) }; +static const struct builtin B_vec_vmulouh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmulouh", "*vmulouh", CODE_FOR_xfxx_complex, B_UID(628) }; +static const struct builtin B_vec_vmuloub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmuloub", "*vmuloub", CODE_FOR_xfxx_complex, B_UID(629) }; +static const struct builtin B_vec_vnmsubfp = { { &T_vec_f32, &T_vec_f32, &T_vec_f32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vnmsubfp", "*vnmsubfp", CODE_FOR_xfxxx_fp, B_UID(630) }; +static const struct builtin B1_vec_vnor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vnor:1", "*vnor", CODE_FOR_xfxx_simple, B_UID(631) }; +static const struct builtin B2_vec_vnor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vnor:2", "*vnor", CODE_FOR_xfxx_simple, B_UID(632) }; +static const struct builtin B3_vec_vnor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vnor:3", "*vnor", CODE_FOR_xfxx_simple, B_UID(633) }; +static const struct builtin B4_vec_vnor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vnor:4", "*vnor", CODE_FOR_xfxx_simple, B_UID(634) }; +static const struct builtin B5_vec_vnor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vnor:5", "*vnor", CODE_FOR_xfxx_simple, B_UID(635) }; +static const struct builtin B6_vec_vnor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vnor:6", "*vnor", CODE_FOR_xfxx_simple, B_UID(636) }; +static const struct builtin B7_vec_vnor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vnor:7", "*vnor", CODE_FOR_xfxx_simple, B_UID(637) }; +static const struct builtin B8_vec_vnor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vnor:8", "*vnor", CODE_FOR_xfxx_simple, B_UID(638) }; +static const struct builtin B9_vec_vnor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vnor:9", "*vnor", CODE_FOR_xfxx_simple, B_UID(639) }; +static const struct builtin B10_vec_vnor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vnor:10", "*vnor", CODE_FOR_xfxx_simple, B_UID(640) }; +static const struct builtin B1_vec_vor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 2, "vec_vor:1", "*vor", CODE_FOR_xfxx_simple, B_UID(641) }; +static const struct builtin B2_vec_vor = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:2", "*vor", CODE_FOR_xfxx_simple, B_UID(642) }; +static const struct builtin B3_vec_vor = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:3", "*vor", CODE_FOR_xfxx_simple, B_UID(643) }; +static const struct builtin B4_vec_vor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 2, "vec_vor:4", "*vor", CODE_FOR_xfxx_simple, B_UID(644) }; +static const struct builtin B5_vec_vor = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:5", "*vor", CODE_FOR_xfxx_simple, B_UID(645) }; +static const struct builtin B6_vec_vor = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:6", "*vor", CODE_FOR_xfxx_simple, B_UID(646) }; +static const struct builtin B7_vec_vor = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:7", "*vor", CODE_FOR_xfxx_simple, B_UID(647) }; +static const struct builtin B8_vec_vor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 2, "vec_vor:8", "*vor", CODE_FOR_xfxx_simple, B_UID(648) }; +static const struct builtin B9_vec_vor = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:9", "*vor", CODE_FOR_xfxx_simple, B_UID(649) }; +static const struct builtin B10_vec_vor = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:10", "*vor", CODE_FOR_xfxx_simple, B_UID(650) }; +static const struct builtin B11_vec_vor = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:11", "*vor", CODE_FOR_xfxx_simple, B_UID(651) }; +static const struct builtin B12_vec_vor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:12", "*vor", CODE_FOR_xfxx_simple, B_UID(652) }; +static const struct builtin B13_vec_vor = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:13", "*vor", CODE_FOR_xfxx_simple, B_UID(653) }; +static const struct builtin B14_vec_vor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:14", "*vor", CODE_FOR_xfxx_simple, B_UID(654) }; +static const struct builtin B15_vec_vor = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:15", "*vor", CODE_FOR_xfxx_simple, B_UID(655) }; +static const struct builtin B16_vec_vor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:16", "*vor", CODE_FOR_xfxx_simple, B_UID(656) }; +static const struct builtin B17_vec_vor = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:17", "*vor", CODE_FOR_xfxx_simple, B_UID(657) }; +static const struct builtin B18_vec_vor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:18", "*vor", CODE_FOR_xfxx_simple, B_UID(658) }; +static const struct builtin B19_vec_vor = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:19", "*vor", CODE_FOR_xfxx_simple, B_UID(659) }; +static const struct builtin B20_vec_vor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:20", "*vor", CODE_FOR_xfxx_simple, B_UID(660) }; +static const struct builtin B21_vec_vor = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:21", "*vor", CODE_FOR_xfxx_simple, B_UID(661) }; +static const struct builtin B22_vec_vor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:22", "*vor", CODE_FOR_xfxx_simple, B_UID(662) }; +static const struct builtin B23_vec_vor = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:23", "*vor", CODE_FOR_xfxx_simple, B_UID(663) }; +static const struct builtin B24_vec_vor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:24", "*vor", CODE_FOR_xfxx_simple, B_UID(664) }; +static const struct builtin B1_vec_vpkuhum = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vpkuhum:1", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(665) }; +static const struct builtin B1_vec_vpkuwum = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vpkuwum:1", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(666) }; +static const struct builtin B2_vec_vpkuhum = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vpkuhum:2", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(667) }; +static const struct builtin B2_vec_vpkuwum = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vpkuwum:2", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(668) }; +static const struct builtin B3_vec_vpkuhum = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkuhum:3", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(669) }; +static const struct builtin B3_vec_vpkuwum = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkuwum:3", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(670) }; +static const struct builtin B_vec_vpkpx = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vpkpx", "*vpkpx", CODE_FOR_xfxx_perm, B_UID(671) }; +static const struct builtin B_vec_vpkshss = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vpkshss", "*vpkshss", CODE_FOR_xfxx_perm, B_UID(672) }; +static const struct builtin B_vec_vpkswss = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vpkswss", "*vpkswss", CODE_FOR_xfxx_perm, B_UID(673) }; +static const struct builtin B_vec_vpkuhus = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkuhus", "*vpkuhus", CODE_FOR_xfxx_perm, B_UID(674) }; +static const struct builtin B_vec_vpkuwus = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkuwus", "*vpkuwus", CODE_FOR_xfxx_perm, B_UID(675) }; +static const struct builtin B_vec_vpkshus = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkshus", "*vpkshus", CODE_FOR_xfxx_perm, B_UID(676) }; +static const struct builtin B_vec_vpkswus = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkswus", "*vpkswus", CODE_FOR_xfxx_perm, B_UID(677) }; +static const struct builtin B1_vec_vperm = { { &T_vec_b16, &T_vec_b16, &T_vec_u8, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vperm:1", "*vperm", CODE_FOR_xfxxx_perm, B_UID(678) }; +static const struct builtin B2_vec_vperm = { { &T_vec_b32, &T_vec_b32, &T_vec_u8, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vperm:2", "*vperm", CODE_FOR_xfxxx_perm, B_UID(679) }; +static const struct builtin B3_vec_vperm = { { &T_vec_b8, &T_vec_b8, &T_vec_u8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vperm:3", "*vperm", CODE_FOR_xfxxx_perm, B_UID(680) }; +static const struct builtin B4_vec_vperm = { { &T_vec_f32, &T_vec_f32, &T_vec_u8, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vperm:4", "*vperm", CODE_FOR_xfxxx_perm, B_UID(681) }; +static const struct builtin B5_vec_vperm = { { &T_vec_p16, &T_vec_p16, &T_vec_u8, }, "xxx", &T_vec_p16, 3, FALSE, FALSE, 0, "vec_vperm:5", "*vperm", CODE_FOR_xfxxx_perm, B_UID(682) }; +static const struct builtin B6_vec_vperm = { { &T_vec_s16, &T_vec_s16, &T_vec_u8, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vperm:6", "*vperm", CODE_FOR_xfxxx_perm, B_UID(683) }; +static const struct builtin B7_vec_vperm = { { &T_vec_s32, &T_vec_s32, &T_vec_u8, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vperm:7", "*vperm", CODE_FOR_xfxxx_perm, B_UID(684) }; +static const struct builtin B8_vec_vperm = { { &T_vec_s8, &T_vec_s8, &T_vec_u8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vperm:8", "*vperm", CODE_FOR_xfxxx_perm, B_UID(685) }; +static const struct builtin B9_vec_vperm = { { &T_vec_u16, &T_vec_u16, &T_vec_u8, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vperm:9", "*vperm", CODE_FOR_xfxxx_perm, B_UID(686) }; +static const struct builtin B10_vec_vperm = { { &T_vec_u32, &T_vec_u32, &T_vec_u8, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vperm:10", "*vperm", CODE_FOR_xfxxx_perm, B_UID(687) }; +static const struct builtin B11_vec_vperm = { { &T_vec_u8, &T_vec_u8, &T_vec_u8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vperm:11", "*vperm", CODE_FOR_xfxxx_perm, B_UID(688) }; +static const struct builtin B_vec_vrefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrefp", "*vrefp", CODE_FOR_xfx_fp, B_UID(689) }; +static const struct builtin B1_vec_vrlh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vrlh:1", "*vrlh", CODE_FOR_xfxx_simple, B_UID(690) }; +static const struct builtin B1_vec_vrlw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vrlw:1", "*vrlw", CODE_FOR_xfxx_simple, B_UID(691) }; +static const struct builtin B1_vec_vrlb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vrlb:1", "*vrlb", CODE_FOR_xfxx_simple, B_UID(692) }; +static const struct builtin B2_vec_vrlh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vrlh:2", "*vrlh", CODE_FOR_xfxx_simple, B_UID(693) }; +static const struct builtin B2_vec_vrlw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vrlw:2", "*vrlw", CODE_FOR_xfxx_simple, B_UID(694) }; +static const struct builtin B2_vec_vrlb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vrlb:2", "*vrlb", CODE_FOR_xfxx_simple, B_UID(695) }; +static const struct builtin B_vec_vrfin = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfin", "*vrfin", CODE_FOR_xfx_fp, B_UID(696) }; +static const struct builtin B_vec_vrsqrtefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrsqrtefp", "*vrsqrtefp", CODE_FOR_xfx_fp, B_UID(697) }; +static const struct builtin B1_vec_vsel = { { &T_vec_b16, &T_vec_b16, &T_vec_b16, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vsel:1", "*vsel", CODE_FOR_xfxxx_simple, B_UID(698) }; +static const struct builtin B2_vec_vsel = { { &T_vec_b16, &T_vec_b16, &T_vec_u16, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vsel:2", "*vsel", CODE_FOR_xfxxx_simple, B_UID(699) }; +static const struct builtin B3_vec_vsel = { { &T_vec_b32, &T_vec_b32, &T_vec_b32, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vsel:3", "*vsel", CODE_FOR_xfxxx_simple, B_UID(700) }; +static const struct builtin B4_vec_vsel = { { &T_vec_b32, &T_vec_b32, &T_vec_u32, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vsel:4", "*vsel", CODE_FOR_xfxxx_simple, B_UID(701) }; +static const struct builtin B5_vec_vsel = { { &T_vec_b8, &T_vec_b8, &T_vec_b8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vsel:5", "*vsel", CODE_FOR_xfxxx_simple, B_UID(702) }; +static const struct builtin B6_vec_vsel = { { &T_vec_b8, &T_vec_b8, &T_vec_u8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vsel:6", "*vsel", CODE_FOR_xfxxx_simple, B_UID(703) }; +static const struct builtin B7_vec_vsel = { { &T_vec_f32, &T_vec_f32, &T_vec_b32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vsel:7", "*vsel", CODE_FOR_xfxxx_simple, B_UID(704) }; +static const struct builtin B8_vec_vsel = { { &T_vec_f32, &T_vec_f32, &T_vec_u32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vsel:8", "*vsel", CODE_FOR_xfxxx_simple, B_UID(705) }; +static const struct builtin B9_vec_vsel = { { &T_vec_s16, &T_vec_s16, &T_vec_b16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vsel:9", "*vsel", CODE_FOR_xfxxx_simple, B_UID(706) }; +static const struct builtin B10_vec_vsel = { { &T_vec_s16, &T_vec_s16, &T_vec_u16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vsel:10", "*vsel", CODE_FOR_xfxxx_simple, B_UID(707) }; +static const struct builtin B11_vec_vsel = { { &T_vec_s32, &T_vec_s32, &T_vec_b32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vsel:11", "*vsel", CODE_FOR_xfxxx_simple, B_UID(708) }; +static const struct builtin B12_vec_vsel = { { &T_vec_s32, &T_vec_s32, &T_vec_u32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vsel:12", "*vsel", CODE_FOR_xfxxx_simple, B_UID(709) }; +static const struct builtin B13_vec_vsel = { { &T_vec_s8, &T_vec_s8, &T_vec_b8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vsel:13", "*vsel", CODE_FOR_xfxxx_simple, B_UID(710) }; +static const struct builtin B14_vec_vsel = { { &T_vec_s8, &T_vec_s8, &T_vec_u8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vsel:14", "*vsel", CODE_FOR_xfxxx_simple, B_UID(711) }; +static const struct builtin B15_vec_vsel = { { &T_vec_u16, &T_vec_u16, &T_vec_b16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vsel:15", "*vsel", CODE_FOR_xfxxx_simple, B_UID(712) }; +static const struct builtin B16_vec_vsel = { { &T_vec_u16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vsel:16", "*vsel", CODE_FOR_xfxxx_simple, B_UID(713) }; +static const struct builtin B17_vec_vsel = { { &T_vec_u32, &T_vec_u32, &T_vec_b32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vsel:17", "*vsel", CODE_FOR_xfxxx_simple, B_UID(714) }; +static const struct builtin B18_vec_vsel = { { &T_vec_u32, &T_vec_u32, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vsel:18", "*vsel", CODE_FOR_xfxxx_simple, B_UID(715) }; +static const struct builtin B19_vec_vsel = { { &T_vec_u8, &T_vec_u8, &T_vec_b8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vsel:19", "*vsel", CODE_FOR_xfxxx_simple, B_UID(716) }; +static const struct builtin B20_vec_vsel = { { &T_vec_u8, &T_vec_u8, &T_vec_u8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vsel:20", "*vsel", CODE_FOR_xfxxx_simple, B_UID(717) }; +static const struct builtin B1_vec_vslh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslh:1", "*vslh", CODE_FOR_xfxx_simple, B_UID(718) }; +static const struct builtin B1_vec_vslw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslw:1", "*vslw", CODE_FOR_xfxx_simple, B_UID(719) }; +static const struct builtin B1_vec_vslb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslb:1", "*vslb", CODE_FOR_xfxx_simple, B_UID(720) }; +static const struct builtin B2_vec_vslh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslh:2", "*vslh", CODE_FOR_xfxx_simple, B_UID(721) }; +static const struct builtin B2_vec_vslw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslw:2", "*vslw", CODE_FOR_xfxx_simple, B_UID(722) }; +static const struct builtin B2_vec_vslb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslb:2", "*vslb", CODE_FOR_xfxx_simple, B_UID(723) }; +static const struct builtin B1_vec_vsldoi = { { &T_vec_b16, &T_vec_b16, &T_immed_u4, }, "xxC", &T_vec_b16, 3, FALSE, FALSE, 3, "vec_vsldoi:1", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(724) }; +static const struct builtin B2_vec_vsldoi = { { &T_vec_b32, &T_vec_b32, &T_immed_u4, }, "xxC", &T_vec_b32, 3, FALSE, FALSE, 3, "vec_vsldoi:2", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(725) }; +static const struct builtin B3_vec_vsldoi = { { &T_vec_b8, &T_vec_b8, &T_immed_u4, }, "xxC", &T_vec_b8, 3, FALSE, FALSE, 3, "vec_vsldoi:3", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(726) }; +static const struct builtin B4_vec_vsldoi = { { &T_vec_f32, &T_vec_f32, &T_immed_u4, }, "xxC", &T_vec_f32, 3, FALSE, FALSE, 3, "vec_vsldoi:4", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(727) }; +static const struct builtin B5_vec_vsldoi = { { &T_vec_p16, &T_vec_p16, &T_immed_u4, }, "xxC", &T_vec_p16, 3, FALSE, FALSE, 3, "vec_vsldoi:5", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(728) }; +static const struct builtin B6_vec_vsldoi = { { &T_vec_s16, &T_vec_s16, &T_immed_u4, }, "xxC", &T_vec_s16, 3, FALSE, FALSE, 3, "vec_vsldoi:6", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(729) }; +static const struct builtin B7_vec_vsldoi = { { &T_vec_s32, &T_vec_s32, &T_immed_u4, }, "xxC", &T_vec_s32, 3, FALSE, FALSE, 3, "vec_vsldoi:7", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(730) }; +static const struct builtin B8_vec_vsldoi = { { &T_vec_s8, &T_vec_s8, &T_immed_u4, }, "xxC", &T_vec_s8, 3, FALSE, FALSE, 3, "vec_vsldoi:8", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(731) }; +static const struct builtin B9_vec_vsldoi = { { &T_vec_u16, &T_vec_u16, &T_immed_u4, }, "xxC", &T_vec_u16, 3, FALSE, FALSE, 3, "vec_vsldoi:9", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(732) }; +static const struct builtin B10_vec_vsldoi = { { &T_vec_u32, &T_vec_u32, &T_immed_u4, }, "xxC", &T_vec_u32, 3, FALSE, FALSE, 3, "vec_vsldoi:10", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(733) }; +static const struct builtin B11_vec_vsldoi = { { &T_vec_u8, &T_vec_u8, &T_immed_u4, }, "xxC", &T_vec_u8, 3, FALSE, FALSE, 3, "vec_vsldoi:11", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(734) }; +static const struct builtin B1_vec_vsl = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:1", "*vsl", CODE_FOR_xfxx_simple, B_UID(735) }; +static const struct builtin B2_vec_vsl = { { &T_vec_b16, &T_vec_u32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:2", "*vsl", CODE_FOR_xfxx_simple, B_UID(736) }; +static const struct builtin B3_vec_vsl = { { &T_vec_b16, &T_vec_u8, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:3", "*vsl", CODE_FOR_xfxx_simple, B_UID(737) }; +static const struct builtin B4_vec_vsl = { { &T_vec_b32, &T_vec_u16, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:4", "*vsl", CODE_FOR_xfxx_simple, B_UID(738) }; +static const struct builtin B5_vec_vsl = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:5", "*vsl", CODE_FOR_xfxx_simple, B_UID(739) }; +static const struct builtin B6_vec_vsl = { { &T_vec_b32, &T_vec_u8, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:6", "*vsl", CODE_FOR_xfxx_simple, B_UID(740) }; +static const struct builtin B7_vec_vsl = { { &T_vec_b8, &T_vec_u16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:7", "*vsl", CODE_FOR_xfxx_simple, B_UID(741) }; +static const struct builtin B8_vec_vsl = { { &T_vec_b8, &T_vec_u32, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:8", "*vsl", CODE_FOR_xfxx_simple, B_UID(742) }; +static const struct builtin B9_vec_vsl = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:9", "*vsl", CODE_FOR_xfxx_simple, B_UID(743) }; +static const struct builtin B10_vec_vsl = { { &T_vec_p16, &T_vec_u16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:10", "*vsl", CODE_FOR_xfxx_simple, B_UID(744) }; +static const struct builtin B11_vec_vsl = { { &T_vec_p16, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:11", "*vsl", CODE_FOR_xfxx_simple, B_UID(745) }; +static const struct builtin B12_vec_vsl = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:12", "*vsl", CODE_FOR_xfxx_simple, B_UID(746) }; +static const struct builtin B13_vec_vsl = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:13", "*vsl", CODE_FOR_xfxx_simple, B_UID(747) }; +static const struct builtin B14_vec_vsl = { { &T_vec_s16, &T_vec_u32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:14", "*vsl", CODE_FOR_xfxx_simple, B_UID(748) }; +static const struct builtin B15_vec_vsl = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:15", "*vsl", CODE_FOR_xfxx_simple, B_UID(749) }; +static const struct builtin B16_vec_vsl = { { &T_vec_s32, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:16", "*vsl", CODE_FOR_xfxx_simple, B_UID(750) }; +static const struct builtin B17_vec_vsl = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:17", "*vsl", CODE_FOR_xfxx_simple, B_UID(751) }; +static const struct builtin B18_vec_vsl = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:18", "*vsl", CODE_FOR_xfxx_simple, B_UID(752) }; +static const struct builtin B19_vec_vsl = { { &T_vec_s8, &T_vec_u16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:19", "*vsl", CODE_FOR_xfxx_simple, B_UID(753) }; +static const struct builtin B20_vec_vsl = { { &T_vec_s8, &T_vec_u32, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:20", "*vsl", CODE_FOR_xfxx_simple, B_UID(754) }; +static const struct builtin B21_vec_vsl = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:21", "*vsl", CODE_FOR_xfxx_simple, B_UID(755) }; +static const struct builtin B22_vec_vsl = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:22", "*vsl", CODE_FOR_xfxx_simple, B_UID(756) }; +static const struct builtin B23_vec_vsl = { { &T_vec_u16, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:23", "*vsl", CODE_FOR_xfxx_simple, B_UID(757) }; +static const struct builtin B24_vec_vsl = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:24", "*vsl", CODE_FOR_xfxx_simple, B_UID(758) }; +static const struct builtin B25_vec_vsl = { { &T_vec_u32, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:25", "*vsl", CODE_FOR_xfxx_simple, B_UID(759) }; +static const struct builtin B26_vec_vsl = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:26", "*vsl", CODE_FOR_xfxx_simple, B_UID(760) }; +static const struct builtin B27_vec_vsl = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:27", "*vsl", CODE_FOR_xfxx_simple, B_UID(761) }; +static const struct builtin B28_vec_vsl = { { &T_vec_u8, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:28", "*vsl", CODE_FOR_xfxx_simple, B_UID(762) }; +static const struct builtin B29_vec_vsl = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:29", "*vsl", CODE_FOR_xfxx_simple, B_UID(763) }; +static const struct builtin B30_vec_vsl = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:30", "*vsl", CODE_FOR_xfxx_simple, B_UID(764) }; +static const struct builtin B1_vec_vslo = { { &T_vec_f32, &T_vec_s8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vslo:1", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(765) }; +static const struct builtin B2_vec_vslo = { { &T_vec_f32, &T_vec_u8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vslo:2", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(766) }; +static const struct builtin B3_vec_vslo = { { &T_vec_p16, &T_vec_s8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vslo:3", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(767) }; +static const struct builtin B4_vec_vslo = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vslo:4", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(768) }; +static const struct builtin B5_vec_vslo = { { &T_vec_s16, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslo:5", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(769) }; +static const struct builtin B6_vec_vslo = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslo:6", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(770) }; +static const struct builtin B7_vec_vslo = { { &T_vec_s32, &T_vec_s8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslo:7", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(771) }; +static const struct builtin B8_vec_vslo = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslo:8", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(772) }; +static const struct builtin B9_vec_vslo = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslo:9", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(773) }; +static const struct builtin B10_vec_vslo = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslo:10", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(774) }; +static const struct builtin B11_vec_vslo = { { &T_vec_u16, &T_vec_s8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslo:11", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(775) }; +static const struct builtin B12_vec_vslo = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslo:12", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(776) }; +static const struct builtin B13_vec_vslo = { { &T_vec_u32, &T_vec_s8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslo:13", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(777) }; +static const struct builtin B14_vec_vslo = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslo:14", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(778) }; +static const struct builtin B15_vec_vslo = { { &T_vec_u8, &T_vec_s8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslo:15", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(779) }; +static const struct builtin B16_vec_vslo = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslo:16", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(780) }; +static const struct builtin B1_vec_vsplth = { { &T_vec_b16, &T_immed_u5, NULL, }, "xB", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsplth:1", "*vsplth", CODE_FOR_xfxB_perm, B_UID(781) }; +static const struct builtin B1_vec_vspltw = { { &T_vec_b32, &T_immed_u5, NULL, }, "xB", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vspltw:1", "*vspltw", CODE_FOR_xfxB_perm, B_UID(782) }; +static const struct builtin B1_vec_vspltb = { { &T_vec_b8, &T_immed_u5, NULL, }, "xB", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vspltb:1", "*vspltb", CODE_FOR_xfxB_perm, B_UID(783) }; +static const struct builtin B2_vec_vspltw = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vspltw:2", "*vspltw", CODE_FOR_xfxB_perm, B_UID(784) }; +static const struct builtin B2_vec_vsplth = { { &T_vec_p16, &T_immed_u5, NULL, }, "xB", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsplth:2", "*vsplth", CODE_FOR_xfxB_perm, B_UID(785) }; +static const struct builtin B3_vec_vsplth = { { &T_vec_s16, &T_immed_u5, NULL, }, "xB", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsplth:3", "*vsplth", CODE_FOR_xfxB_perm, B_UID(786) }; +static const struct builtin B3_vec_vspltw = { { &T_vec_s32, &T_immed_u5, NULL, }, "xB", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vspltw:3", "*vspltw", CODE_FOR_xfxB_perm, B_UID(787) }; +static const struct builtin B2_vec_vspltb = { { &T_vec_s8, &T_immed_u5, NULL, }, "xB", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vspltb:2", "*vspltb", CODE_FOR_xfxB_perm, B_UID(788) }; +static const struct builtin B4_vec_vsplth = { { &T_vec_u16, &T_immed_u5, NULL, }, "xB", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsplth:4", "*vsplth", CODE_FOR_xfxB_perm, B_UID(789) }; +static const struct builtin B4_vec_vspltw = { { &T_vec_u32, &T_immed_u5, NULL, }, "xB", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vspltw:4", "*vspltw", CODE_FOR_xfxB_perm, B_UID(790) }; +static const struct builtin B3_vec_vspltb = { { &T_vec_u8, &T_immed_u5, NULL, }, "xB", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vspltb:3", "*vspltb", CODE_FOR_xfxB_perm, B_UID(791) }; +static const struct builtin B_vec_vspltish = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s16, 1, FALSE, FALSE, 5, "vec_vspltish", "*vspltish", CODE_FOR_xfA_perm, B_UID(792) }; +static const struct builtin B_vec_vspltisw = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s32, 1, FALSE, FALSE, 6, "vec_vspltisw", "*vspltisw", CODE_FOR_xfA_perm, B_UID(793) }; +static const struct builtin B_vec_vspltisb = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s8, 1, FALSE, FALSE, 4, "vec_vspltisb", "*vspltisb", CODE_FOR_xfA_perm, B_UID(794) }; +static const struct builtin B_vec_splat_u16 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u16, 1, FALSE, FALSE, 5, "vec_splat_u16", "*vspltish", CODE_FOR_xfA_perm, B_UID(795) }; +static const struct builtin B_vec_splat_u32 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u32, 1, FALSE, FALSE, 6, "vec_splat_u32", "*vspltisw", CODE_FOR_xfA_perm, B_UID(796) }; +static const struct builtin B_vec_splat_u8 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u8, 1, FALSE, FALSE, 4, "vec_splat_u8", "*vspltisb", CODE_FOR_xfA_perm, B_UID(797) }; +static const struct builtin B1_vec_vsrh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsrh:1", "*vsrh", CODE_FOR_xfxx_simple, B_UID(798) }; +static const struct builtin B1_vec_vsrw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsrw:1", "*vsrw", CODE_FOR_xfxx_simple, B_UID(799) }; +static const struct builtin B1_vec_vsrb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsrb:1", "*vsrb", CODE_FOR_xfxx_simple, B_UID(800) }; +static const struct builtin B2_vec_vsrh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsrh:2", "*vsrh", CODE_FOR_xfxx_simple, B_UID(801) }; +static const struct builtin B2_vec_vsrw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsrw:2", "*vsrw", CODE_FOR_xfxx_simple, B_UID(802) }; +static const struct builtin B2_vec_vsrb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsrb:2", "*vsrb", CODE_FOR_xfxx_simple, B_UID(803) }; +static const struct builtin B1_vec_vsrah = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsrah:1", "*vsrah", CODE_FOR_xfxx_simple, B_UID(804) }; +static const struct builtin B1_vec_vsraw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsraw:1", "*vsraw", CODE_FOR_xfxx_simple, B_UID(805) }; +static const struct builtin B1_vec_vsrab = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsrab:1", "*vsrab", CODE_FOR_xfxx_simple, B_UID(806) }; +static const struct builtin B2_vec_vsrah = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsrah:2", "*vsrah", CODE_FOR_xfxx_simple, B_UID(807) }; +static const struct builtin B2_vec_vsraw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsraw:2", "*vsraw", CODE_FOR_xfxx_simple, B_UID(808) }; +static const struct builtin B2_vec_vsrab = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsrab:2", "*vsrab", CODE_FOR_xfxx_simple, B_UID(809) }; +static const struct builtin B1_vec_vsr = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:1", "*vsr", CODE_FOR_xfxx_simple, B_UID(810) }; +static const struct builtin B2_vec_vsr = { { &T_vec_b16, &T_vec_u32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:2", "*vsr", CODE_FOR_xfxx_simple, B_UID(811) }; +static const struct builtin B3_vec_vsr = { { &T_vec_b16, &T_vec_u8, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:3", "*vsr", CODE_FOR_xfxx_simple, B_UID(812) }; +static const struct builtin B4_vec_vsr = { { &T_vec_b32, &T_vec_u16, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:4", "*vsr", CODE_FOR_xfxx_simple, B_UID(813) }; +static const struct builtin B5_vec_vsr = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:5", "*vsr", CODE_FOR_xfxx_simple, B_UID(814) }; +static const struct builtin B6_vec_vsr = { { &T_vec_b32, &T_vec_u8, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:6", "*vsr", CODE_FOR_xfxx_simple, B_UID(815) }; +static const struct builtin B7_vec_vsr = { { &T_vec_b8, &T_vec_u16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:7", "*vsr", CODE_FOR_xfxx_simple, B_UID(816) }; +static const struct builtin B8_vec_vsr = { { &T_vec_b8, &T_vec_u32, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:8", "*vsr", CODE_FOR_xfxx_simple, B_UID(817) }; +static const struct builtin B9_vec_vsr = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:9", "*vsr", CODE_FOR_xfxx_simple, B_UID(818) }; +static const struct builtin B10_vec_vsr = { { &T_vec_p16, &T_vec_u16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:10", "*vsr", CODE_FOR_xfxx_simple, B_UID(819) }; +static const struct builtin B11_vec_vsr = { { &T_vec_p16, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:11", "*vsr", CODE_FOR_xfxx_simple, B_UID(820) }; +static const struct builtin B12_vec_vsr = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:12", "*vsr", CODE_FOR_xfxx_simple, B_UID(821) }; +static const struct builtin B13_vec_vsr = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:13", "*vsr", CODE_FOR_xfxx_simple, B_UID(822) }; +static const struct builtin B14_vec_vsr = { { &T_vec_s16, &T_vec_u32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:14", "*vsr", CODE_FOR_xfxx_simple, B_UID(823) }; +static const struct builtin B15_vec_vsr = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:15", "*vsr", CODE_FOR_xfxx_simple, B_UID(824) }; +static const struct builtin B16_vec_vsr = { { &T_vec_s32, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:16", "*vsr", CODE_FOR_xfxx_simple, B_UID(825) }; +static const struct builtin B17_vec_vsr = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:17", "*vsr", CODE_FOR_xfxx_simple, B_UID(826) }; +static const struct builtin B18_vec_vsr = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:18", "*vsr", CODE_FOR_xfxx_simple, B_UID(827) }; +static const struct builtin B19_vec_vsr = { { &T_vec_s8, &T_vec_u16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:19", "*vsr", CODE_FOR_xfxx_simple, B_UID(828) }; +static const struct builtin B20_vec_vsr = { { &T_vec_s8, &T_vec_u32, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:20", "*vsr", CODE_FOR_xfxx_simple, B_UID(829) }; +static const struct builtin B21_vec_vsr = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:21", "*vsr", CODE_FOR_xfxx_simple, B_UID(830) }; +static const struct builtin B22_vec_vsr = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:22", "*vsr", CODE_FOR_xfxx_simple, B_UID(831) }; +static const struct builtin B23_vec_vsr = { { &T_vec_u16, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:23", "*vsr", CODE_FOR_xfxx_simple, B_UID(832) }; +static const struct builtin B24_vec_vsr = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:24", "*vsr", CODE_FOR_xfxx_simple, B_UID(833) }; +static const struct builtin B25_vec_vsr = { { &T_vec_u32, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:25", "*vsr", CODE_FOR_xfxx_simple, B_UID(834) }; +static const struct builtin B26_vec_vsr = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:26", "*vsr", CODE_FOR_xfxx_simple, B_UID(835) }; +static const struct builtin B27_vec_vsr = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:27", "*vsr", CODE_FOR_xfxx_simple, B_UID(836) }; +static const struct builtin B28_vec_vsr = { { &T_vec_u8, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:28", "*vsr", CODE_FOR_xfxx_simple, B_UID(837) }; +static const struct builtin B29_vec_vsr = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:29", "*vsr", CODE_FOR_xfxx_simple, B_UID(838) }; +static const struct builtin B30_vec_vsr = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:30", "*vsr", CODE_FOR_xfxx_simple, B_UID(839) }; +static const struct builtin B1_vec_vsro = { { &T_vec_f32, &T_vec_s8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vsro:1", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(840) }; +static const struct builtin B2_vec_vsro = { { &T_vec_f32, &T_vec_u8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vsro:2", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(841) }; +static const struct builtin B3_vec_vsro = { { &T_vec_p16, &T_vec_s8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsro:3", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(842) }; +static const struct builtin B4_vec_vsro = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsro:4", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(843) }; +static const struct builtin B5_vec_vsro = { { &T_vec_s16, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsro:5", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(844) }; +static const struct builtin B6_vec_vsro = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsro:6", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(845) }; +static const struct builtin B7_vec_vsro = { { &T_vec_s32, &T_vec_s8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsro:7", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(846) }; +static const struct builtin B8_vec_vsro = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsro:8", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(847) }; +static const struct builtin B9_vec_vsro = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsro:9", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(848) }; +static const struct builtin B10_vec_vsro = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsro:10", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(849) }; +static const struct builtin B11_vec_vsro = { { &T_vec_u16, &T_vec_s8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsro:11", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(850) }; +static const struct builtin B12_vec_vsro = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsro:12", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(851) }; +static const struct builtin B13_vec_vsro = { { &T_vec_u32, &T_vec_s8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsro:13", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(852) }; +static const struct builtin B14_vec_vsro = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsro:14", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(853) }; +static const struct builtin B15_vec_vsro = { { &T_vec_u8, &T_vec_s8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsro:15", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(854) }; +static const struct builtin B16_vec_vsro = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsro:16", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(855) }; +static const struct builtin B1_vec_stvx = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:1", "*stvx", CODE_FOR_sfxii_store, B_UID(856) }; +static const struct builtin B2_vec_stvx = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:2", "*stvx", CODE_FOR_sfxii_store, B_UID(857) }; +static const struct builtin B3_vec_stvx = { { &T_vec_b16, &T_int, &T_vec_b16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:3", "*stvx", CODE_FOR_sfxii_store, B_UID(858) }; +static const struct builtin B4_vec_stvx = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:4", "*stvx", CODE_FOR_sfxii_store, B_UID(859) }; +static const struct builtin B5_vec_stvx = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:5", "*stvx", CODE_FOR_sfxii_store, B_UID(860) }; +static const struct builtin B6_vec_stvx = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:6", "*stvx", CODE_FOR_sfxii_store, B_UID(861) }; +static const struct builtin B7_vec_stvx = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:7", "*stvx", CODE_FOR_sfxii_store, B_UID(862) }; +static const struct builtin B8_vec_stvx = { { &T_vec_b32, &T_int, &T_vec_b32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:8", "*stvx", CODE_FOR_sfxii_store, B_UID(863) }; +static const struct builtin B9_vec_stvx = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:9", "*stvx", CODE_FOR_sfxii_store, B_UID(864) }; +static const struct builtin B10_vec_stvx = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:10", "*stvx", CODE_FOR_sfxii_store, B_UID(865) }; +static const struct builtin B11_vec_stvx = { { &T_vec_b8, &T_int, &T_vec_b8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:11", "*stvx", CODE_FOR_sfxii_store, B_UID(866) }; +static const struct builtin B12_vec_stvx = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:12", "*stvx", CODE_FOR_sfxii_store, B_UID(867) }; +static const struct builtin B13_vec_stvx = { { &T_vec_f32, &T_int, &T_vec_f32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:13", "*stvx", CODE_FOR_sfxii_store, B_UID(868) }; +static const struct builtin B14_vec_stvx = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:14", "*stvx", CODE_FOR_sfxii_store, B_UID(869) }; +static const struct builtin B15_vec_stvx = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:15", "*stvx", CODE_FOR_sfxii_store, B_UID(870) }; +static const struct builtin B16_vec_stvx = { { &T_vec_p16, &T_int, &T_vec_p16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:16", "*stvx", CODE_FOR_sfxii_store, B_UID(871) }; +static const struct builtin B17_vec_stvx = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:17", "*stvx", CODE_FOR_sfxii_store, B_UID(872) }; +static const struct builtin B18_vec_stvx = { { &T_vec_s16, &T_int, &T_vec_s16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:18", "*stvx", CODE_FOR_sfxii_store, B_UID(873) }; +static const struct builtin B19_vec_stvx = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:19", "*stvx", CODE_FOR_sfxii_store, B_UID(874) }; +static const struct builtin B20_vec_stvx = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:20", "*stvx", CODE_FOR_sfxii_store, B_UID(875) }; +static const struct builtin B21_vec_stvx = { { &T_vec_s32, &T_int, &T_vec_s32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:21", "*stvx", CODE_FOR_sfxii_store, B_UID(876) }; +static const struct builtin B22_vec_stvx = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:22", "*stvx", CODE_FOR_sfxii_store, B_UID(877) }; +static const struct builtin B23_vec_stvx = { { &T_vec_s8, &T_int, &T_vec_s8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:23", "*stvx", CODE_FOR_sfxii_store, B_UID(878) }; +static const struct builtin B24_vec_stvx = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:24", "*stvx", CODE_FOR_sfxii_store, B_UID(879) }; +static const struct builtin B25_vec_stvx = { { &T_vec_u16, &T_int, &T_vec_u16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:25", "*stvx", CODE_FOR_sfxii_store, B_UID(880) }; +static const struct builtin B26_vec_stvx = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:26", "*stvx", CODE_FOR_sfxii_store, B_UID(881) }; +static const struct builtin B27_vec_stvx = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:27", "*stvx", CODE_FOR_sfxii_store, B_UID(882) }; +static const struct builtin B28_vec_stvx = { { &T_vec_u32, &T_int, &T_vec_u32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:28", "*stvx", CODE_FOR_sfxii_store, B_UID(883) }; +static const struct builtin B29_vec_stvx = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:29", "*stvx", CODE_FOR_sfxii_store, B_UID(884) }; +static const struct builtin B30_vec_stvx = { { &T_vec_u8, &T_int, &T_vec_u8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:30", "*stvx", CODE_FOR_sfxii_store, B_UID(885) }; +static const struct builtin B1_vec_stvebx = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:1", "*stvebx", CODE_FOR_sfxii_store, B_UID(886) }; +static const struct builtin B2_vec_stvebx = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:2", "*stvebx", CODE_FOR_sfxii_store, B_UID(887) }; +static const struct builtin B1_vec_stvewx = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:1", "*stvewx", CODE_FOR_sfxii_store, B_UID(888) }; +static const struct builtin B2_vec_stvewx = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:2", "*stvewx", CODE_FOR_sfxii_store, B_UID(889) }; +static const struct builtin B3_vec_stvewx = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:3", "*stvewx", CODE_FOR_sfxii_store, B_UID(890) }; +static const struct builtin B4_vec_stvewx = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:4", "*stvewx", CODE_FOR_sfxii_store, B_UID(891) }; +static const struct builtin B3_vec_stvebx = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:3", "*stvebx", CODE_FOR_sfxii_store, B_UID(892) }; +static const struct builtin B4_vec_stvebx = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:4", "*stvebx", CODE_FOR_sfxii_store, B_UID(893) }; +static const struct builtin B5_vec_stvewx = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:5", "*stvewx", CODE_FOR_sfxii_store, B_UID(894) }; +static const struct builtin B1_vec_stvehx = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:1", "*stvehx", CODE_FOR_sfxii_store, B_UID(895) }; +static const struct builtin B2_vec_stvehx = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:2", "*stvehx", CODE_FOR_sfxii_store, B_UID(896) }; +static const struct builtin B3_vec_stvehx = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:3", "*stvehx", CODE_FOR_sfxii_store, B_UID(897) }; +static const struct builtin B6_vec_stvewx = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:6", "*stvewx", CODE_FOR_sfxii_store, B_UID(898) }; +static const struct builtin B7_vec_stvewx = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:7", "*stvewx", CODE_FOR_sfxii_store, B_UID(899) }; +static const struct builtin B5_vec_stvebx = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:5", "*stvebx", CODE_FOR_sfxii_store, B_UID(900) }; +static const struct builtin B4_vec_stvehx = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:4", "*stvehx", CODE_FOR_sfxii_store, B_UID(901) }; +static const struct builtin B8_vec_stvewx = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:8", "*stvewx", CODE_FOR_sfxii_store, B_UID(902) }; +static const struct builtin B9_vec_stvewx = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:9", "*stvewx", CODE_FOR_sfxii_store, B_UID(903) }; +static const struct builtin B6_vec_stvebx = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:6", "*stvebx", CODE_FOR_sfxii_store, B_UID(904) }; +static const struct builtin B1_vec_stvxl = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:1", "*stvxl", CODE_FOR_sfxii_store, B_UID(905) }; +static const struct builtin B2_vec_stvxl = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:2", "*stvxl", CODE_FOR_sfxii_store, B_UID(906) }; +static const struct builtin B3_vec_stvxl = { { &T_vec_b16, &T_int, &T_vec_b16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:3", "*stvxl", CODE_FOR_sfxii_store, B_UID(907) }; +static const struct builtin B4_vec_stvxl = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:4", "*stvxl", CODE_FOR_sfxii_store, B_UID(908) }; +static const struct builtin B5_vec_stvxl = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:5", "*stvxl", CODE_FOR_sfxii_store, B_UID(909) }; +static const struct builtin B6_vec_stvxl = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:6", "*stvxl", CODE_FOR_sfxii_store, B_UID(910) }; +static const struct builtin B7_vec_stvxl = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:7", "*stvxl", CODE_FOR_sfxii_store, B_UID(911) }; +static const struct builtin B8_vec_stvxl = { { &T_vec_b32, &T_int, &T_vec_b32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:8", "*stvxl", CODE_FOR_sfxii_store, B_UID(912) }; +static const struct builtin B9_vec_stvxl = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:9", "*stvxl", CODE_FOR_sfxii_store, B_UID(913) }; +static const struct builtin B10_vec_stvxl = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:10", "*stvxl", CODE_FOR_sfxii_store, B_UID(914) }; +static const struct builtin B11_vec_stvxl = { { &T_vec_b8, &T_int, &T_vec_b8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:11", "*stvxl", CODE_FOR_sfxii_store, B_UID(915) }; +static const struct builtin B12_vec_stvxl = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:12", "*stvxl", CODE_FOR_sfxii_store, B_UID(916) }; +static const struct builtin B13_vec_stvxl = { { &T_vec_f32, &T_int, &T_vec_f32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:13", "*stvxl", CODE_FOR_sfxii_store, B_UID(917) }; +static const struct builtin B14_vec_stvxl = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:14", "*stvxl", CODE_FOR_sfxii_store, B_UID(918) }; +static const struct builtin B15_vec_stvxl = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:15", "*stvxl", CODE_FOR_sfxii_store, B_UID(919) }; +static const struct builtin B16_vec_stvxl = { { &T_vec_p16, &T_int, &T_vec_p16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:16", "*stvxl", CODE_FOR_sfxii_store, B_UID(920) }; +static const struct builtin B17_vec_stvxl = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:17", "*stvxl", CODE_FOR_sfxii_store, B_UID(921) }; +static const struct builtin B18_vec_stvxl = { { &T_vec_s16, &T_int, &T_vec_s16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:18", "*stvxl", CODE_FOR_sfxii_store, B_UID(922) }; +static const struct builtin B19_vec_stvxl = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:19", "*stvxl", CODE_FOR_sfxii_store, B_UID(923) }; +static const struct builtin B20_vec_stvxl = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:20", "*stvxl", CODE_FOR_sfxii_store, B_UID(924) }; +static const struct builtin B21_vec_stvxl = { { &T_vec_s32, &T_int, &T_vec_s32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:21", "*stvxl", CODE_FOR_sfxii_store, B_UID(925) }; +static const struct builtin B22_vec_stvxl = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:22", "*stvxl", CODE_FOR_sfxii_store, B_UID(926) }; +static const struct builtin B23_vec_stvxl = { { &T_vec_s8, &T_int, &T_vec_s8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:23", "*stvxl", CODE_FOR_sfxii_store, B_UID(927) }; +static const struct builtin B24_vec_stvxl = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:24", "*stvxl", CODE_FOR_sfxii_store, B_UID(928) }; +static const struct builtin B25_vec_stvxl = { { &T_vec_u16, &T_int, &T_vec_u16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:25", "*stvxl", CODE_FOR_sfxii_store, B_UID(929) }; +static const struct builtin B26_vec_stvxl = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:26", "*stvxl", CODE_FOR_sfxii_store, B_UID(930) }; +static const struct builtin B27_vec_stvxl = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:27", "*stvxl", CODE_FOR_sfxii_store, B_UID(931) }; +static const struct builtin B28_vec_stvxl = { { &T_vec_u32, &T_int, &T_vec_u32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:28", "*stvxl", CODE_FOR_sfxii_store, B_UID(932) }; +static const struct builtin B29_vec_stvxl = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:29", "*stvxl", CODE_FOR_sfxii_store, B_UID(933) }; +static const struct builtin B30_vec_stvxl = { { &T_vec_u8, &T_int, &T_vec_u8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:30", "*stvxl", CODE_FOR_sfxii_store, B_UID(934) }; +static const struct builtin B1_vec_vsubuhm = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:1", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(935) }; +static const struct builtin B2_vec_vsubuhm = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:2", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(936) }; +static const struct builtin B1_vec_vsubuwm = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:1", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(937) }; +static const struct builtin B2_vec_vsubuwm = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:2", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(938) }; +static const struct builtin B1_vec_vsububm = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:1", "*vsububm", CODE_FOR_xfxx_simple, B_UID(939) }; +static const struct builtin B2_vec_vsububm = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:2", "*vsububm", CODE_FOR_xfxx_simple, B_UID(940) }; +static const struct builtin B_vec_vsubfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vsubfp", "*vsubfp", CODE_FOR_xfxx_fp, B_UID(941) }; +static const struct builtin B3_vec_vsubuhm = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:3", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(942) }; +static const struct builtin B4_vec_vsubuhm = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:4", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(943) }; +static const struct builtin B3_vec_vsubuwm = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:3", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(944) }; +static const struct builtin B4_vec_vsubuwm = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:4", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(945) }; +static const struct builtin B3_vec_vsububm = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:3", "*vsububm", CODE_FOR_xfxx_simple, B_UID(946) }; +static const struct builtin B4_vec_vsububm = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:4", "*vsububm", CODE_FOR_xfxx_simple, B_UID(947) }; +static const struct builtin B5_vec_vsubuhm = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:5", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(948) }; +static const struct builtin B6_vec_vsubuhm = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:6", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(949) }; +static const struct builtin B5_vec_vsubuwm = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:5", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(950) }; +static const struct builtin B6_vec_vsubuwm = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:6", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(951) }; +static const struct builtin B5_vec_vsububm = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:5", "*vsububm", CODE_FOR_xfxx_simple, B_UID(952) }; +static const struct builtin B6_vec_vsububm = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:6", "*vsububm", CODE_FOR_xfxx_simple, B_UID(953) }; +static const struct builtin B_vec_vsubcuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsubcuw", "*vsubcuw", CODE_FOR_xfxx_simple, B_UID(954) }; +static const struct builtin B1_vec_vsubshs = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:1", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(955) }; +static const struct builtin B1_vec_vsubuhs = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:1", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(956) }; +static const struct builtin B1_vec_vsubsws = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:1", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(957) }; +static const struct builtin B1_vec_vsubuws = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:1", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(958) }; +static const struct builtin B1_vec_vsubsbs = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:1", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(959) }; +static const struct builtin B1_vec_vsububs = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:1", "*vsububs", CODE_FOR_xfxx_simple, B_UID(960) }; +static const struct builtin B2_vec_vsubshs = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:2", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(961) }; +static const struct builtin B3_vec_vsubshs = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:3", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(962) }; +static const struct builtin B2_vec_vsubsws = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:2", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(963) }; +static const struct builtin B3_vec_vsubsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:3", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(964) }; +static const struct builtin B2_vec_vsubsbs = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:2", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(965) }; +static const struct builtin B3_vec_vsubsbs = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:3", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(966) }; +static const struct builtin B2_vec_vsubuhs = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:2", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(967) }; +static const struct builtin B3_vec_vsubuhs = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:3", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(968) }; +static const struct builtin B2_vec_vsubuws = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:2", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(969) }; +static const struct builtin B3_vec_vsubuws = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:3", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(970) }; +static const struct builtin B2_vec_vsububs = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:2", "*vsububs", CODE_FOR_xfxx_simple, B_UID(971) }; +static const struct builtin B3_vec_vsububs = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:3", "*vsububs", CODE_FOR_xfxx_simple, B_UID(972) }; +static const struct builtin B_vec_vsum2sws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum2sws", "*vsum2sws", CODE_FOR_xfxx_complex, B_UID(973) }; +static const struct builtin B_vec_vsum4shs = { { &T_vec_s16, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum4shs", "*vsum4shs", CODE_FOR_xfxx_complex, B_UID(974) }; +static const struct builtin B_vec_vsum4sbs = { { &T_vec_s8, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum4sbs", "*vsum4sbs", CODE_FOR_xfxx_complex, B_UID(975) }; +static const struct builtin B_vec_vsum4ubs = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsum4ubs", "*vsum4ubs", CODE_FOR_xfxx_complex, B_UID(976) }; +static const struct builtin B_vec_vsumsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsumsws", "*vsumsws", CODE_FOR_xfxx_complex, B_UID(977) }; +static const struct builtin B_vec_vrfiz = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfiz", "*vrfiz", CODE_FOR_xfx_fp, B_UID(978) }; +static const struct builtin B1_vec_unpack2sh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_unpack2sh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(979) }; +static const struct builtin B2_vec_unpack2sh = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_unpack2sh:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(980) }; +static const struct builtin B1_vec_unpack2sl = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_unpack2sl:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(981) }; +static const struct builtin B2_vec_unpack2sl = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_unpack2sl:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(982) }; +static const struct builtin B1_vec_unpack2uh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_unpack2uh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(983) }; +static const struct builtin B2_vec_unpack2uh = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_unpack2uh:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(984) }; +static const struct builtin B1_vec_unpack2ul = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_unpack2ul:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(985) }; +static const struct builtin B2_vec_unpack2ul = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_unpack2ul:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(986) }; +static const struct builtin B1_vec_vupkhsh = { { &T_vec_b16, NULL, NULL, }, "x", &T_vec_b32, 1, FALSE, FALSE, 0, "vec_vupkhsh:1", "*vupkhsh", CODE_FOR_xfx_perm, B_UID(987) }; +static const struct builtin B1_vec_vupkhsb = { { &T_vec_b8, NULL, NULL, }, "x", &T_vec_b16, 1, FALSE, FALSE, 0, "vec_vupkhsb:1", "*vupkhsb", CODE_FOR_xfx_perm, B_UID(988) }; +static const struct builtin B_vec_vupkhpx = { { &T_vec_p16, NULL, NULL, }, "x", &T_vec_u32, 1, FALSE, FALSE, 0, "vec_vupkhpx", "*vupkhpx", CODE_FOR_xfx_perm, B_UID(989) }; +static const struct builtin B2_vec_vupkhsh = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 0, "vec_vupkhsh:2", "*vupkhsh", CODE_FOR_xfx_perm, B_UID(990) }; +static const struct builtin B2_vec_vupkhsb = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 0, "vec_vupkhsb:2", "*vupkhsb", CODE_FOR_xfx_perm, B_UID(991) }; +static const struct builtin B1_vec_vupklsh = { { &T_vec_b16, NULL, NULL, }, "x", &T_vec_b32, 1, FALSE, FALSE, 0, "vec_vupklsh:1", "*vupklsh", CODE_FOR_xfx_perm, B_UID(992) }; +static const struct builtin B1_vec_vupklsb = { { &T_vec_b8, NULL, NULL, }, "x", &T_vec_b16, 1, FALSE, FALSE, 0, "vec_vupklsb:1", "*vupklsb", CODE_FOR_xfx_perm, B_UID(993) }; +static const struct builtin B_vec_vupklpx = { { &T_vec_p16, NULL, NULL, }, "x", &T_vec_u32, 1, FALSE, FALSE, 0, "vec_vupklpx", "*vupklpx", CODE_FOR_xfx_perm, B_UID(994) }; +static const struct builtin B2_vec_vupklsh = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 0, "vec_vupklsh:2", "*vupklsh", CODE_FOR_xfx_perm, B_UID(995) }; +static const struct builtin B2_vec_vupklsb = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 0, "vec_vupklsb:2", "*vupklsb", CODE_FOR_xfx_perm, B_UID(996) }; +static const struct builtin B1_vec_vxor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 1, "vec_vxor:1", "*vxor", CODE_FOR_xfxx_simple, B_UID(997) }; +static const struct builtin B2_vec_vxor = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:2", "*vxor", CODE_FOR_xfxx_simple, B_UID(998) }; +static const struct builtin B3_vec_vxor = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:3", "*vxor", CODE_FOR_xfxx_simple, B_UID(999) }; +static const struct builtin B4_vec_vxor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 1, "vec_vxor:4", "*vxor", CODE_FOR_xfxx_simple, B_UID(1000) }; +static const struct builtin B5_vec_vxor = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:5", "*vxor", CODE_FOR_xfxx_simple, B_UID(1001) }; +static const struct builtin B6_vec_vxor = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:6", "*vxor", CODE_FOR_xfxx_simple, B_UID(1002) }; +static const struct builtin B7_vec_vxor = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:7", "*vxor", CODE_FOR_xfxx_simple, B_UID(1003) }; +static const struct builtin B8_vec_vxor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 1, "vec_vxor:8", "*vxor", CODE_FOR_xfxx_simple, B_UID(1004) }; +static const struct builtin B9_vec_vxor = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:9", "*vxor", CODE_FOR_xfxx_simple, B_UID(1005) }; +static const struct builtin B10_vec_vxor = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:10", "*vxor", CODE_FOR_xfxx_simple, B_UID(1006) }; +static const struct builtin B11_vec_vxor = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:11", "*vxor", CODE_FOR_xfxx_simple, B_UID(1007) }; +static const struct builtin B12_vec_vxor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:12", "*vxor", CODE_FOR_xfxx_simple, B_UID(1008) }; +static const struct builtin B13_vec_vxor = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:13", "*vxor", CODE_FOR_xfxx_simple, B_UID(1009) }; +static const struct builtin B14_vec_vxor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:14", "*vxor", CODE_FOR_xfxx_simple, B_UID(1010) }; +static const struct builtin B15_vec_vxor = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:15", "*vxor", CODE_FOR_xfxx_simple, B_UID(1011) }; +static const struct builtin B16_vec_vxor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:16", "*vxor", CODE_FOR_xfxx_simple, B_UID(1012) }; +static const struct builtin B17_vec_vxor = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:17", "*vxor", CODE_FOR_xfxx_simple, B_UID(1013) }; +static const struct builtin B18_vec_vxor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:18", "*vxor", CODE_FOR_xfxx_simple, B_UID(1014) }; +static const struct builtin B19_vec_vxor = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:19", "*vxor", CODE_FOR_xfxx_simple, B_UID(1015) }; +static const struct builtin B20_vec_vxor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:20", "*vxor", CODE_FOR_xfxx_simple, B_UID(1016) }; +static const struct builtin B21_vec_vxor = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:21", "*vxor", CODE_FOR_xfxx_simple, B_UID(1017) }; +static const struct builtin B22_vec_vxor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:22", "*vxor", CODE_FOR_xfxx_simple, B_UID(1018) }; +static const struct builtin B23_vec_vxor = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:23", "*vxor", CODE_FOR_xfxx_simple, B_UID(1019) }; +static const struct builtin B24_vec_vxor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:24", "*vxor", CODE_FOR_xfxx_simple, B_UID(1020) }; +#define LAST_B_UID B_UID(1021) + +const struct builtin * const Builtin[] = { + &B1_vec_abs, + &B2_vec_abs, + &B3_vec_abs, + &B4_vec_abs, + &B1_vec_abss, + &B2_vec_abss, + &B3_vec_abss, + &B1_vec_vadduhm, + &B2_vec_vadduhm, + &B1_vec_vadduwm, + &B2_vec_vadduwm, + &B1_vec_vaddubm, + &B2_vec_vaddubm, + &B_vec_vaddfp, + &B3_vec_vadduhm, + &B4_vec_vadduhm, + &B3_vec_vadduwm, + &B4_vec_vadduwm, + &B3_vec_vaddubm, + &B4_vec_vaddubm, + &B5_vec_vadduhm, + &B6_vec_vadduhm, + &B5_vec_vadduwm, + &B6_vec_vadduwm, + &B5_vec_vaddubm, + &B6_vec_vaddubm, + &B_vec_vaddcuw, + &B1_vec_vaddshs, + &B1_vec_vadduhs, + &B1_vec_vaddsws, + &B1_vec_vadduws, + &B1_vec_vaddsbs, + &B1_vec_vaddubs, + &B2_vec_vaddshs, + &B3_vec_vaddshs, + &B2_vec_vaddsws, + &B3_vec_vaddsws, + &B2_vec_vaddsbs, + &B3_vec_vaddsbs, + &B2_vec_vadduhs, + &B3_vec_vadduhs, + &B2_vec_vadduws, + &B3_vec_vadduws, + &B2_vec_vaddubs, + &B3_vec_vaddubs, + &B1_vec_all_eq, + &B2_vec_all_eq, + &B3_vec_all_eq, + &B4_vec_all_eq, + &B5_vec_all_eq, + &B6_vec_all_eq, + &B7_vec_all_eq, + &B8_vec_all_eq, + &B9_vec_all_eq, + &B10_vec_all_eq, + &B11_vec_all_eq, + &B12_vec_all_eq, + &B13_vec_all_eq, + &B14_vec_all_eq, + &B15_vec_all_eq, + &B16_vec_all_eq, + &B17_vec_all_eq, + &B18_vec_all_eq, + &B19_vec_all_eq, + &B20_vec_all_eq, + &B21_vec_all_eq, + &B22_vec_all_eq, + &B23_vec_all_eq, + &B1_vec_all_ge, + &B2_vec_all_ge, + &B3_vec_all_ge, + &B4_vec_all_ge, + &B5_vec_all_ge, + &B6_vec_all_ge, + &B7_vec_all_ge, + &B8_vec_all_ge, + &B9_vec_all_ge, + &B10_vec_all_ge, + &B11_vec_all_ge, + &B12_vec_all_ge, + &B13_vec_all_ge, + &B14_vec_all_ge, + &B15_vec_all_ge, + &B16_vec_all_ge, + &B17_vec_all_ge, + &B18_vec_all_ge, + &B19_vec_all_ge, + &B1_vec_all_gt, + &B2_vec_all_gt, + &B3_vec_all_gt, + &B4_vec_all_gt, + &B5_vec_all_gt, + &B6_vec_all_gt, + &B7_vec_all_gt, + &B8_vec_all_gt, + &B9_vec_all_gt, + &B10_vec_all_gt, + &B11_vec_all_gt, + &B12_vec_all_gt, + &B13_vec_all_gt, + &B14_vec_all_gt, + &B15_vec_all_gt, + &B16_vec_all_gt, + &B17_vec_all_gt, + &B18_vec_all_gt, + &B19_vec_all_gt, + &B_vec_all_in, + &B1_vec_all_le, + &B2_vec_all_le, + &B3_vec_all_le, + &B4_vec_all_le, + &B5_vec_all_le, + &B6_vec_all_le, + &B7_vec_all_le, + &B8_vec_all_le, + &B9_vec_all_le, + &B10_vec_all_le, + &B11_vec_all_le, + &B12_vec_all_le, + &B13_vec_all_le, + &B14_vec_all_le, + &B15_vec_all_le, + &B16_vec_all_le, + &B17_vec_all_le, + &B18_vec_all_le, + &B19_vec_all_le, + &B1_vec_all_lt, + &B2_vec_all_lt, + &B3_vec_all_lt, + &B4_vec_all_lt, + &B5_vec_all_lt, + &B6_vec_all_lt, + &B7_vec_all_lt, + &B8_vec_all_lt, + &B9_vec_all_lt, + &B10_vec_all_lt, + &B11_vec_all_lt, + &B12_vec_all_lt, + &B13_vec_all_lt, + &B14_vec_all_lt, + &B15_vec_all_lt, + &B16_vec_all_lt, + &B17_vec_all_lt, + &B18_vec_all_lt, + &B19_vec_all_lt, + &B_vec_all_nan, + &B1_vec_all_ne, + &B2_vec_all_ne, + &B3_vec_all_ne, + &B4_vec_all_ne, + &B5_vec_all_ne, + &B6_vec_all_ne, + &B7_vec_all_ne, + &B8_vec_all_ne, + &B9_vec_all_ne, + &B10_vec_all_ne, + &B11_vec_all_ne, + &B12_vec_all_ne, + &B13_vec_all_ne, + &B14_vec_all_ne, + &B15_vec_all_ne, + &B16_vec_all_ne, + &B17_vec_all_ne, + &B18_vec_all_ne, + &B19_vec_all_ne, + &B20_vec_all_ne, + &B21_vec_all_ne, + &B22_vec_all_ne, + &B23_vec_all_ne, + &B_vec_all_nge, + &B_vec_all_ngt, + &B_vec_all_nle, + &B_vec_all_nlt, + &B_vec_all_numeric, + &B1_vec_vand, + &B2_vec_vand, + &B3_vec_vand, + &B4_vec_vand, + &B5_vec_vand, + &B6_vec_vand, + &B7_vec_vand, + &B8_vec_vand, + &B9_vec_vand, + &B10_vec_vand, + &B11_vec_vand, + &B12_vec_vand, + &B13_vec_vand, + &B14_vec_vand, + &B15_vec_vand, + &B16_vec_vand, + &B17_vec_vand, + &B18_vec_vand, + &B19_vec_vand, + &B20_vec_vand, + &B21_vec_vand, + &B22_vec_vand, + &B23_vec_vand, + &B24_vec_vand, + &B1_vec_vandc, + &B2_vec_vandc, + &B3_vec_vandc, + &B4_vec_vandc, + &B5_vec_vandc, + &B6_vec_vandc, + &B7_vec_vandc, + &B8_vec_vandc, + &B9_vec_vandc, + &B10_vec_vandc, + &B11_vec_vandc, + &B12_vec_vandc, + &B13_vec_vandc, + &B14_vec_vandc, + &B15_vec_vandc, + &B16_vec_vandc, + &B17_vec_vandc, + &B18_vec_vandc, + &B19_vec_vandc, + &B20_vec_vandc, + &B21_vec_vandc, + &B22_vec_vandc, + &B23_vec_vandc, + &B24_vec_vandc, + &B1_vec_any_eq, + &B2_vec_any_eq, + &B3_vec_any_eq, + &B4_vec_any_eq, + &B5_vec_any_eq, + &B6_vec_any_eq, + &B7_vec_any_eq, + &B8_vec_any_eq, + &B9_vec_any_eq, + &B10_vec_any_eq, + &B11_vec_any_eq, + &B12_vec_any_eq, + &B13_vec_any_eq, + &B14_vec_any_eq, + &B15_vec_any_eq, + &B16_vec_any_eq, + &B17_vec_any_eq, + &B18_vec_any_eq, + &B19_vec_any_eq, + &B20_vec_any_eq, + &B21_vec_any_eq, + &B22_vec_any_eq, + &B23_vec_any_eq, + &B1_vec_any_ge, + &B2_vec_any_ge, + &B3_vec_any_ge, + &B4_vec_any_ge, + &B5_vec_any_ge, + &B6_vec_any_ge, + &B7_vec_any_ge, + &B8_vec_any_ge, + &B9_vec_any_ge, + &B10_vec_any_ge, + &B11_vec_any_ge, + &B12_vec_any_ge, + &B13_vec_any_ge, + &B14_vec_any_ge, + &B15_vec_any_ge, + &B16_vec_any_ge, + &B17_vec_any_ge, + &B18_vec_any_ge, + &B19_vec_any_ge, + &B1_vec_any_gt, + &B2_vec_any_gt, + &B3_vec_any_gt, + &B4_vec_any_gt, + &B5_vec_any_gt, + &B6_vec_any_gt, + &B7_vec_any_gt, + &B8_vec_any_gt, + &B9_vec_any_gt, + &B10_vec_any_gt, + &B11_vec_any_gt, + &B12_vec_any_gt, + &B13_vec_any_gt, + &B14_vec_any_gt, + &B15_vec_any_gt, + &B16_vec_any_gt, + &B17_vec_any_gt, + &B18_vec_any_gt, + &B19_vec_any_gt, + &B1_vec_any_le, + &B2_vec_any_le, + &B3_vec_any_le, + &B4_vec_any_le, + &B5_vec_any_le, + &B6_vec_any_le, + &B7_vec_any_le, + &B8_vec_any_le, + &B9_vec_any_le, + &B10_vec_any_le, + &B11_vec_any_le, + &B12_vec_any_le, + &B13_vec_any_le, + &B14_vec_any_le, + &B15_vec_any_le, + &B16_vec_any_le, + &B17_vec_any_le, + &B18_vec_any_le, + &B19_vec_any_le, + &B1_vec_any_lt, + &B2_vec_any_lt, + &B3_vec_any_lt, + &B4_vec_any_lt, + &B5_vec_any_lt, + &B6_vec_any_lt, + &B7_vec_any_lt, + &B8_vec_any_lt, + &B9_vec_any_lt, + &B10_vec_any_lt, + &B11_vec_any_lt, + &B12_vec_any_lt, + &B13_vec_any_lt, + &B14_vec_any_lt, + &B15_vec_any_lt, + &B16_vec_any_lt, + &B17_vec_any_lt, + &B18_vec_any_lt, + &B19_vec_any_lt, + &B_vec_any_nan, + &B1_vec_any_ne, + &B2_vec_any_ne, + &B3_vec_any_ne, + &B4_vec_any_ne, + &B5_vec_any_ne, + &B6_vec_any_ne, + &B7_vec_any_ne, + &B8_vec_any_ne, + &B9_vec_any_ne, + &B10_vec_any_ne, + &B11_vec_any_ne, + &B12_vec_any_ne, + &B13_vec_any_ne, + &B14_vec_any_ne, + &B15_vec_any_ne, + &B16_vec_any_ne, + &B17_vec_any_ne, + &B18_vec_any_ne, + &B19_vec_any_ne, + &B20_vec_any_ne, + &B21_vec_any_ne, + &B22_vec_any_ne, + &B23_vec_any_ne, + &B_vec_any_nge, + &B_vec_any_ngt, + &B_vec_any_nle, + &B_vec_any_nlt, + &B_vec_any_numeric, + &B_vec_any_out, + &B_vec_vavgsh, + &B_vec_vavgsw, + &B_vec_vavgsb, + &B_vec_vavguh, + &B_vec_vavguw, + &B_vec_vavgub, + &B_vec_vrfip, + &B_vec_vcmpbfp, + &B_vec_vcmpeqfp, + &B1_vec_vcmpequh, + &B1_vec_vcmpequw, + &B1_vec_vcmpequb, + &B2_vec_vcmpequh, + &B2_vec_vcmpequw, + &B2_vec_vcmpequb, + &B_vec_vcmpgefp, + &B_vec_vcmpgtfp, + &B_vec_vcmpgtsh, + &B_vec_vcmpgtsw, + &B_vec_vcmpgtsb, + &B_vec_vcmpgtuh, + &B_vec_vcmpgtuw, + &B_vec_vcmpgtub, + &B_vec_cmple, + &B1_vec_cmplt, + &B2_vec_cmplt, + &B3_vec_cmplt, + &B4_vec_cmplt, + &B5_vec_cmplt, + &B6_vec_cmplt, + &B7_vec_cmplt, + &B_vec_vcfsx, + &B_vec_vcfux, + &B_vec_vctsxs, + &B_vec_vctuxs, + &B_vec_dss, + &B_vec_dssall, + &B1_vec_dst, + &B2_vec_dst, + &B3_vec_dst, + &B4_vec_dst, + &B5_vec_dst, + &B6_vec_dst, + &B7_vec_dst, + &B8_vec_dst, + &B9_vec_dst, + &B10_vec_dst, + &B11_vec_dst, + &B12_vec_dst, + &B13_vec_dst, + &B14_vec_dst, + &B15_vec_dst, + &B16_vec_dst, + &B17_vec_dst, + &B18_vec_dst, + &B19_vec_dst, + &B20_vec_dst, + &B1_vec_dstst, + &B2_vec_dstst, + &B3_vec_dstst, + &B4_vec_dstst, + &B5_vec_dstst, + &B6_vec_dstst, + &B7_vec_dstst, + &B8_vec_dstst, + &B9_vec_dstst, + &B10_vec_dstst, + &B11_vec_dstst, + &B12_vec_dstst, + &B13_vec_dstst, + &B14_vec_dstst, + &B15_vec_dstst, + &B16_vec_dstst, + &B17_vec_dstst, + &B18_vec_dstst, + &B19_vec_dstst, + &B20_vec_dstst, + &B1_vec_dststt, + &B2_vec_dststt, + &B3_vec_dststt, + &B4_vec_dststt, + &B5_vec_dststt, + &B6_vec_dststt, + &B7_vec_dststt, + &B8_vec_dststt, + &B9_vec_dststt, + &B10_vec_dststt, + &B11_vec_dststt, + &B12_vec_dststt, + &B13_vec_dststt, + &B14_vec_dststt, + &B15_vec_dststt, + &B16_vec_dststt, + &B17_vec_dststt, + &B18_vec_dststt, + &B19_vec_dststt, + &B20_vec_dststt, + &B1_vec_dstt, + &B2_vec_dstt, + &B3_vec_dstt, + &B4_vec_dstt, + &B5_vec_dstt, + &B6_vec_dstt, + &B7_vec_dstt, + &B8_vec_dstt, + &B9_vec_dstt, + &B10_vec_dstt, + &B11_vec_dstt, + &B12_vec_dstt, + &B13_vec_dstt, + &B14_vec_dstt, + &B15_vec_dstt, + &B16_vec_dstt, + &B17_vec_dstt, + &B18_vec_dstt, + &B19_vec_dstt, + &B20_vec_dstt, + &B_vec_vexptefp, + &B_vec_vrfim, + &B1_vec_lvx, + &B2_vec_lvx, + &B3_vec_lvx, + &B4_vec_lvx, + &B5_vec_lvx, + &B6_vec_lvx, + &B7_vec_lvx, + &B8_vec_lvx, + &B9_vec_lvx, + &B10_vec_lvx, + &B11_vec_lvx, + &B12_vec_lvx, + &B13_vec_lvx, + &B14_vec_lvx, + &B15_vec_lvx, + &B16_vec_lvx, + &B17_vec_lvx, + &B18_vec_lvx, + &B19_vec_lvx, + &B20_vec_lvx, + &B1_vec_lvewx, + &B2_vec_lvewx, + &B3_vec_lvewx, + &B1_vec_lvehx, + &B1_vec_lvebx, + &B2_vec_lvebx, + &B4_vec_lvewx, + &B5_vec_lvewx, + &B2_vec_lvehx, + &B1_vec_lvxl, + &B2_vec_lvxl, + &B3_vec_lvxl, + &B4_vec_lvxl, + &B5_vec_lvxl, + &B6_vec_lvxl, + &B7_vec_lvxl, + &B8_vec_lvxl, + &B9_vec_lvxl, + &B10_vec_lvxl, + &B11_vec_lvxl, + &B12_vec_lvxl, + &B13_vec_lvxl, + &B14_vec_lvxl, + &B15_vec_lvxl, + &B16_vec_lvxl, + &B17_vec_lvxl, + &B18_vec_lvxl, + &B19_vec_lvxl, + &B20_vec_lvxl, + &B_vec_vlogefp, + &B1_vec_lvsl, + &B2_vec_lvsl, + &B3_vec_lvsl, + &B4_vec_lvsl, + &B5_vec_lvsl, + &B6_vec_lvsl, + &B7_vec_lvsl, + &B8_vec_lvsl, + &B9_vec_lvsl, + &B1_vec_lvsr, + &B2_vec_lvsr, + &B3_vec_lvsr, + &B4_vec_lvsr, + &B5_vec_lvsr, + &B6_vec_lvsr, + &B7_vec_lvsr, + &B8_vec_lvsr, + &B9_vec_lvsr, + &B_vec_vmaddfp, + &B_vec_vmhaddshs, + &B1_vec_vmaxsh, + &B1_vec_vmaxuh, + &B1_vec_vmaxsw, + &B1_vec_vmaxuw, + &B1_vec_vmaxsb, + &B1_vec_vmaxub, + &B_vec_vmaxfp, + &B2_vec_vmaxsh, + &B3_vec_vmaxsh, + &B2_vec_vmaxsw, + &B3_vec_vmaxsw, + &B2_vec_vmaxsb, + &B3_vec_vmaxsb, + &B2_vec_vmaxuh, + &B3_vec_vmaxuh, + &B2_vec_vmaxuw, + &B3_vec_vmaxuw, + &B2_vec_vmaxub, + &B3_vec_vmaxub, + &B1_vec_vmrghh, + &B1_vec_vmrghw, + &B1_vec_vmrghb, + &B2_vec_vmrghw, + &B2_vec_vmrghh, + &B3_vec_vmrghh, + &B3_vec_vmrghw, + &B2_vec_vmrghb, + &B4_vec_vmrghh, + &B4_vec_vmrghw, + &B3_vec_vmrghb, + &B1_vec_vmrglh, + &B1_vec_vmrglw, + &B1_vec_vmrglb, + &B2_vec_vmrglw, + &B2_vec_vmrglh, + &B3_vec_vmrglh, + &B3_vec_vmrglw, + &B2_vec_vmrglb, + &B4_vec_vmrglh, + &B4_vec_vmrglw, + &B3_vec_vmrglb, + &B_vec_mfvscr, + &B1_vec_vminsh, + &B1_vec_vminuh, + &B1_vec_vminsw, + &B1_vec_vminuw, + &B1_vec_vminsb, + &B1_vec_vminub, + &B_vec_vminfp, + &B2_vec_vminsh, + &B3_vec_vminsh, + &B2_vec_vminsw, + &B3_vec_vminsw, + &B2_vec_vminsb, + &B3_vec_vminsb, + &B2_vec_vminuh, + &B3_vec_vminuh, + &B2_vec_vminuw, + &B3_vec_vminuw, + &B2_vec_vminub, + &B3_vec_vminub, + &B1_vec_vmladduhm, + &B2_vec_vmladduhm, + &B3_vec_vmladduhm, + &B4_vec_vmladduhm, + &B_vec_vmhraddshs, + &B_vec_vmsumshm, + &B_vec_vmsummbm, + &B_vec_vmsumuhm, + &B_vec_vmsumubm, + &B_vec_vmsumshs, + &B_vec_vmsumuhs, + &B1_vec_mtvscr, + &B2_vec_mtvscr, + &B3_vec_mtvscr, + &B4_vec_mtvscr, + &B5_vec_mtvscr, + &B6_vec_mtvscr, + &B7_vec_mtvscr, + &B8_vec_mtvscr, + &B9_vec_mtvscr, + &B10_vec_mtvscr, + &B_vec_vmulesh, + &B_vec_vmulesb, + &B_vec_vmuleuh, + &B_vec_vmuleub, + &B_vec_vmulosh, + &B_vec_vmulosb, + &B_vec_vmulouh, + &B_vec_vmuloub, + &B_vec_vnmsubfp, + &B1_vec_vnor, + &B2_vec_vnor, + &B3_vec_vnor, + &B4_vec_vnor, + &B5_vec_vnor, + &B6_vec_vnor, + &B7_vec_vnor, + &B8_vec_vnor, + &B9_vec_vnor, + &B10_vec_vnor, + &B1_vec_vor, + &B2_vec_vor, + &B3_vec_vor, + &B4_vec_vor, + &B5_vec_vor, + &B6_vec_vor, + &B7_vec_vor, + &B8_vec_vor, + &B9_vec_vor, + &B10_vec_vor, + &B11_vec_vor, + &B12_vec_vor, + &B13_vec_vor, + &B14_vec_vor, + &B15_vec_vor, + &B16_vec_vor, + &B17_vec_vor, + &B18_vec_vor, + &B19_vec_vor, + &B20_vec_vor, + &B21_vec_vor, + &B22_vec_vor, + &B23_vec_vor, + &B24_vec_vor, + &B1_vec_vpkuhum, + &B1_vec_vpkuwum, + &B2_vec_vpkuhum, + &B2_vec_vpkuwum, + &B3_vec_vpkuhum, + &B3_vec_vpkuwum, + &B_vec_vpkpx, + &B_vec_vpkshss, + &B_vec_vpkswss, + &B_vec_vpkuhus, + &B_vec_vpkuwus, + &B_vec_vpkshus, + &B_vec_vpkswus, + &B1_vec_vperm, + &B2_vec_vperm, + &B3_vec_vperm, + &B4_vec_vperm, + &B5_vec_vperm, + &B6_vec_vperm, + &B7_vec_vperm, + &B8_vec_vperm, + &B9_vec_vperm, + &B10_vec_vperm, + &B11_vec_vperm, + &B_vec_vrefp, + &B1_vec_vrlh, + &B1_vec_vrlw, + &B1_vec_vrlb, + &B2_vec_vrlh, + &B2_vec_vrlw, + &B2_vec_vrlb, + &B_vec_vrfin, + &B_vec_vrsqrtefp, + &B1_vec_vsel, + &B2_vec_vsel, + &B3_vec_vsel, + &B4_vec_vsel, + &B5_vec_vsel, + &B6_vec_vsel, + &B7_vec_vsel, + &B8_vec_vsel, + &B9_vec_vsel, + &B10_vec_vsel, + &B11_vec_vsel, + &B12_vec_vsel, + &B13_vec_vsel, + &B14_vec_vsel, + &B15_vec_vsel, + &B16_vec_vsel, + &B17_vec_vsel, + &B18_vec_vsel, + &B19_vec_vsel, + &B20_vec_vsel, + &B1_vec_vslh, + &B1_vec_vslw, + &B1_vec_vslb, + &B2_vec_vslh, + &B2_vec_vslw, + &B2_vec_vslb, + &B1_vec_vsldoi, + &B2_vec_vsldoi, + &B3_vec_vsldoi, + &B4_vec_vsldoi, + &B5_vec_vsldoi, + &B6_vec_vsldoi, + &B7_vec_vsldoi, + &B8_vec_vsldoi, + &B9_vec_vsldoi, + &B10_vec_vsldoi, + &B11_vec_vsldoi, + &B1_vec_vsl, + &B2_vec_vsl, + &B3_vec_vsl, + &B4_vec_vsl, + &B5_vec_vsl, + &B6_vec_vsl, + &B7_vec_vsl, + &B8_vec_vsl, + &B9_vec_vsl, + &B10_vec_vsl, + &B11_vec_vsl, + &B12_vec_vsl, + &B13_vec_vsl, + &B14_vec_vsl, + &B15_vec_vsl, + &B16_vec_vsl, + &B17_vec_vsl, + &B18_vec_vsl, + &B19_vec_vsl, + &B20_vec_vsl, + &B21_vec_vsl, + &B22_vec_vsl, + &B23_vec_vsl, + &B24_vec_vsl, + &B25_vec_vsl, + &B26_vec_vsl, + &B27_vec_vsl, + &B28_vec_vsl, + &B29_vec_vsl, + &B30_vec_vsl, + &B1_vec_vslo, + &B2_vec_vslo, + &B3_vec_vslo, + &B4_vec_vslo, + &B5_vec_vslo, + &B6_vec_vslo, + &B7_vec_vslo, + &B8_vec_vslo, + &B9_vec_vslo, + &B10_vec_vslo, + &B11_vec_vslo, + &B12_vec_vslo, + &B13_vec_vslo, + &B14_vec_vslo, + &B15_vec_vslo, + &B16_vec_vslo, + &B1_vec_vsplth, + &B1_vec_vspltw, + &B1_vec_vspltb, + &B2_vec_vspltw, + &B2_vec_vsplth, + &B3_vec_vsplth, + &B3_vec_vspltw, + &B2_vec_vspltb, + &B4_vec_vsplth, + &B4_vec_vspltw, + &B3_vec_vspltb, + &B_vec_vspltish, + &B_vec_vspltisw, + &B_vec_vspltisb, + &B_vec_splat_u16, + &B_vec_splat_u32, + &B_vec_splat_u8, + &B1_vec_vsrh, + &B1_vec_vsrw, + &B1_vec_vsrb, + &B2_vec_vsrh, + &B2_vec_vsrw, + &B2_vec_vsrb, + &B1_vec_vsrah, + &B1_vec_vsraw, + &B1_vec_vsrab, + &B2_vec_vsrah, + &B2_vec_vsraw, + &B2_vec_vsrab, + &B1_vec_vsr, + &B2_vec_vsr, + &B3_vec_vsr, + &B4_vec_vsr, + &B5_vec_vsr, + &B6_vec_vsr, + &B7_vec_vsr, + &B8_vec_vsr, + &B9_vec_vsr, + &B10_vec_vsr, + &B11_vec_vsr, + &B12_vec_vsr, + &B13_vec_vsr, + &B14_vec_vsr, + &B15_vec_vsr, + &B16_vec_vsr, + &B17_vec_vsr, + &B18_vec_vsr, + &B19_vec_vsr, + &B20_vec_vsr, + &B21_vec_vsr, + &B22_vec_vsr, + &B23_vec_vsr, + &B24_vec_vsr, + &B25_vec_vsr, + &B26_vec_vsr, + &B27_vec_vsr, + &B28_vec_vsr, + &B29_vec_vsr, + &B30_vec_vsr, + &B1_vec_vsro, + &B2_vec_vsro, + &B3_vec_vsro, + &B4_vec_vsro, + &B5_vec_vsro, + &B6_vec_vsro, + &B7_vec_vsro, + &B8_vec_vsro, + &B9_vec_vsro, + &B10_vec_vsro, + &B11_vec_vsro, + &B12_vec_vsro, + &B13_vec_vsro, + &B14_vec_vsro, + &B15_vec_vsro, + &B16_vec_vsro, + &B1_vec_stvx, + &B2_vec_stvx, + &B3_vec_stvx, + &B4_vec_stvx, + &B5_vec_stvx, + &B6_vec_stvx, + &B7_vec_stvx, + &B8_vec_stvx, + &B9_vec_stvx, + &B10_vec_stvx, + &B11_vec_stvx, + &B12_vec_stvx, + &B13_vec_stvx, + &B14_vec_stvx, + &B15_vec_stvx, + &B16_vec_stvx, + &B17_vec_stvx, + &B18_vec_stvx, + &B19_vec_stvx, + &B20_vec_stvx, + &B21_vec_stvx, + &B22_vec_stvx, + &B23_vec_stvx, + &B24_vec_stvx, + &B25_vec_stvx, + &B26_vec_stvx, + &B27_vec_stvx, + &B28_vec_stvx, + &B29_vec_stvx, + &B30_vec_stvx, + &B1_vec_stvebx, + &B2_vec_stvebx, + &B1_vec_stvewx, + &B2_vec_stvewx, + &B3_vec_stvewx, + &B4_vec_stvewx, + &B3_vec_stvebx, + &B4_vec_stvebx, + &B5_vec_stvewx, + &B1_vec_stvehx, + &B2_vec_stvehx, + &B3_vec_stvehx, + &B6_vec_stvewx, + &B7_vec_stvewx, + &B5_vec_stvebx, + &B4_vec_stvehx, + &B8_vec_stvewx, + &B9_vec_stvewx, + &B6_vec_stvebx, + &B1_vec_stvxl, + &B2_vec_stvxl, + &B3_vec_stvxl, + &B4_vec_stvxl, + &B5_vec_stvxl, + &B6_vec_stvxl, + &B7_vec_stvxl, + &B8_vec_stvxl, + &B9_vec_stvxl, + &B10_vec_stvxl, + &B11_vec_stvxl, + &B12_vec_stvxl, + &B13_vec_stvxl, + &B14_vec_stvxl, + &B15_vec_stvxl, + &B16_vec_stvxl, + &B17_vec_stvxl, + &B18_vec_stvxl, + &B19_vec_stvxl, + &B20_vec_stvxl, + &B21_vec_stvxl, + &B22_vec_stvxl, + &B23_vec_stvxl, + &B24_vec_stvxl, + &B25_vec_stvxl, + &B26_vec_stvxl, + &B27_vec_stvxl, + &B28_vec_stvxl, + &B29_vec_stvxl, + &B30_vec_stvxl, + &B1_vec_vsubuhm, + &B2_vec_vsubuhm, + &B1_vec_vsubuwm, + &B2_vec_vsubuwm, + &B1_vec_vsububm, + &B2_vec_vsububm, + &B_vec_vsubfp, + &B3_vec_vsubuhm, + &B4_vec_vsubuhm, + &B3_vec_vsubuwm, + &B4_vec_vsubuwm, + &B3_vec_vsububm, + &B4_vec_vsububm, + &B5_vec_vsubuhm, + &B6_vec_vsubuhm, + &B5_vec_vsubuwm, + &B6_vec_vsubuwm, + &B5_vec_vsububm, + &B6_vec_vsububm, + &B_vec_vsubcuw, + &B1_vec_vsubshs, + &B1_vec_vsubuhs, + &B1_vec_vsubsws, + &B1_vec_vsubuws, + &B1_vec_vsubsbs, + &B1_vec_vsububs, + &B2_vec_vsubshs, + &B3_vec_vsubshs, + &B2_vec_vsubsws, + &B3_vec_vsubsws, + &B2_vec_vsubsbs, + &B3_vec_vsubsbs, + &B2_vec_vsubuhs, + &B3_vec_vsubuhs, + &B2_vec_vsubuws, + &B3_vec_vsubuws, + &B2_vec_vsububs, + &B3_vec_vsububs, + &B_vec_vsum2sws, + &B_vec_vsum4shs, + &B_vec_vsum4sbs, + &B_vec_vsum4ubs, + &B_vec_vsumsws, + &B_vec_vrfiz, + &B1_vec_unpack2sh, + &B2_vec_unpack2sh, + &B1_vec_unpack2sl, + &B2_vec_unpack2sl, + &B1_vec_unpack2uh, + &B2_vec_unpack2uh, + &B1_vec_unpack2ul, + &B2_vec_unpack2ul, + &B1_vec_vupkhsh, + &B1_vec_vupkhsb, + &B_vec_vupkhpx, + &B2_vec_vupkhsh, + &B2_vec_vupkhsb, + &B1_vec_vupklsh, + &B1_vec_vupklsb, + &B_vec_vupklpx, + &B2_vec_vupklsh, + &B2_vec_vupklsb, + &B1_vec_vxor, + &B2_vec_vxor, + &B3_vec_vxor, + &B4_vec_vxor, + &B5_vec_vxor, + &B6_vec_vxor, + &B7_vec_vxor, + &B8_vec_vxor, + &B9_vec_vxor, + &B10_vec_vxor, + &B11_vec_vxor, + &B12_vec_vxor, + &B13_vec_vxor, + &B14_vec_vxor, + &B15_vec_vxor, + &B16_vec_vxor, + &B17_vec_vxor, + &B18_vec_vxor, + &B19_vec_vxor, + &B20_vec_vxor, + &B21_vec_vxor, + &B22_vec_vxor, + &B23_vec_vxor, + &B24_vec_vxor, +}; + +static const struct builtin *const O_vec_abs[4] = { + &B1_vec_abs, + &B2_vec_abs, + &B3_vec_abs, + &B4_vec_abs, +}; +static const struct builtin *const O_vec_abss[3] = { + &B1_vec_abss, + &B2_vec_abss, + &B3_vec_abss, +}; +static const struct builtin *const O_vec_add[19] = { + &B1_vec_vadduhm, + &B2_vec_vadduhm, + &B1_vec_vadduwm, + &B2_vec_vadduwm, + &B1_vec_vaddubm, + &B2_vec_vaddubm, + &B_vec_vaddfp, + &B3_vec_vadduhm, + &B4_vec_vadduhm, + &B3_vec_vadduwm, + &B4_vec_vadduwm, + &B3_vec_vaddubm, + &B4_vec_vaddubm, + &B5_vec_vadduhm, + &B6_vec_vadduhm, + &B5_vec_vadduwm, + &B6_vec_vadduwm, + &B5_vec_vaddubm, + &B6_vec_vaddubm, +}; +static const struct builtin *const O_vec_addc[1] = { + &B_vec_vaddcuw, +}; +static const struct builtin *const O_vec_adds[18] = { + &B1_vec_vaddshs, + &B1_vec_vadduhs, + &B1_vec_vaddsws, + &B1_vec_vadduws, + &B1_vec_vaddsbs, + &B1_vec_vaddubs, + &B2_vec_vaddshs, + &B3_vec_vaddshs, + &B2_vec_vaddsws, + &B3_vec_vaddsws, + &B2_vec_vaddsbs, + &B3_vec_vaddsbs, + &B2_vec_vadduhs, + &B3_vec_vadduhs, + &B2_vec_vadduws, + &B3_vec_vadduws, + &B2_vec_vaddubs, + &B3_vec_vaddubs, +}; +static const struct builtin *const O_vec_all_eq[23] = { + &B1_vec_all_eq, + &B2_vec_all_eq, + &B3_vec_all_eq, + &B4_vec_all_eq, + &B5_vec_all_eq, + &B6_vec_all_eq, + &B7_vec_all_eq, + &B8_vec_all_eq, + &B9_vec_all_eq, + &B10_vec_all_eq, + &B11_vec_all_eq, + &B12_vec_all_eq, + &B13_vec_all_eq, + &B14_vec_all_eq, + &B15_vec_all_eq, + &B16_vec_all_eq, + &B17_vec_all_eq, + &B18_vec_all_eq, + &B19_vec_all_eq, + &B20_vec_all_eq, + &B21_vec_all_eq, + &B22_vec_all_eq, + &B23_vec_all_eq, +}; +static const struct builtin *const O_vec_all_ge[19] = { + &B1_vec_all_ge, + &B2_vec_all_ge, + &B3_vec_all_ge, + &B4_vec_all_ge, + &B5_vec_all_ge, + &B6_vec_all_ge, + &B7_vec_all_ge, + &B8_vec_all_ge, + &B9_vec_all_ge, + &B10_vec_all_ge, + &B11_vec_all_ge, + &B12_vec_all_ge, + &B13_vec_all_ge, + &B14_vec_all_ge, + &B15_vec_all_ge, + &B16_vec_all_ge, + &B17_vec_all_ge, + &B18_vec_all_ge, + &B19_vec_all_ge, +}; +static const struct builtin *const O_vec_all_gt[19] = { + &B1_vec_all_gt, + &B2_vec_all_gt, + &B3_vec_all_gt, + &B4_vec_all_gt, + &B5_vec_all_gt, + &B6_vec_all_gt, + &B7_vec_all_gt, + &B8_vec_all_gt, + &B9_vec_all_gt, + &B10_vec_all_gt, + &B11_vec_all_gt, + &B12_vec_all_gt, + &B13_vec_all_gt, + &B14_vec_all_gt, + &B15_vec_all_gt, + &B16_vec_all_gt, + &B17_vec_all_gt, + &B18_vec_all_gt, + &B19_vec_all_gt, +}; +static const struct builtin *const O_vec_all_in[1] = { + &B_vec_all_in, +}; +static const struct builtin *const O_vec_all_le[19] = { + &B1_vec_all_le, + &B2_vec_all_le, + &B3_vec_all_le, + &B4_vec_all_le, + &B5_vec_all_le, + &B6_vec_all_le, + &B7_vec_all_le, + &B8_vec_all_le, + &B9_vec_all_le, + &B10_vec_all_le, + &B11_vec_all_le, + &B12_vec_all_le, + &B13_vec_all_le, + &B14_vec_all_le, + &B15_vec_all_le, + &B16_vec_all_le, + &B17_vec_all_le, + &B18_vec_all_le, + &B19_vec_all_le, +}; +static const struct builtin *const O_vec_all_lt[19] = { + &B1_vec_all_lt, + &B2_vec_all_lt, + &B3_vec_all_lt, + &B4_vec_all_lt, + &B5_vec_all_lt, + &B6_vec_all_lt, + &B7_vec_all_lt, + &B8_vec_all_lt, + &B9_vec_all_lt, + &B10_vec_all_lt, + &B11_vec_all_lt, + &B12_vec_all_lt, + &B13_vec_all_lt, + &B14_vec_all_lt, + &B15_vec_all_lt, + &B16_vec_all_lt, + &B17_vec_all_lt, + &B18_vec_all_lt, + &B19_vec_all_lt, +}; +static const struct builtin *const O_vec_all_nan[1] = { + &B_vec_all_nan, +}; +static const struct builtin *const O_vec_all_ne[23] = { + &B1_vec_all_ne, + &B2_vec_all_ne, + &B3_vec_all_ne, + &B4_vec_all_ne, + &B5_vec_all_ne, + &B6_vec_all_ne, + &B7_vec_all_ne, + &B8_vec_all_ne, + &B9_vec_all_ne, + &B10_vec_all_ne, + &B11_vec_all_ne, + &B12_vec_all_ne, + &B13_vec_all_ne, + &B14_vec_all_ne, + &B15_vec_all_ne, + &B16_vec_all_ne, + &B17_vec_all_ne, + &B18_vec_all_ne, + &B19_vec_all_ne, + &B20_vec_all_ne, + &B21_vec_all_ne, + &B22_vec_all_ne, + &B23_vec_all_ne, +}; +static const struct builtin *const O_vec_all_nge[1] = { + &B_vec_all_nge, +}; +static const struct builtin *const O_vec_all_ngt[1] = { + &B_vec_all_ngt, +}; +static const struct builtin *const O_vec_all_nle[1] = { + &B_vec_all_nle, +}; +static const struct builtin *const O_vec_all_nlt[1] = { + &B_vec_all_nlt, +}; +static const struct builtin *const O_vec_all_numeric[1] = { + &B_vec_all_numeric, +}; +static const struct builtin *const O_vec_and[24] = { + &B1_vec_vand, + &B2_vec_vand, + &B3_vec_vand, + &B4_vec_vand, + &B5_vec_vand, + &B6_vec_vand, + &B7_vec_vand, + &B8_vec_vand, + &B9_vec_vand, + &B10_vec_vand, + &B11_vec_vand, + &B12_vec_vand, + &B13_vec_vand, + &B14_vec_vand, + &B15_vec_vand, + &B16_vec_vand, + &B17_vec_vand, + &B18_vec_vand, + &B19_vec_vand, + &B20_vec_vand, + &B21_vec_vand, + &B22_vec_vand, + &B23_vec_vand, + &B24_vec_vand, +}; +static const struct builtin *const O_vec_andc[24] = { + &B1_vec_vandc, + &B2_vec_vandc, + &B3_vec_vandc, + &B4_vec_vandc, + &B5_vec_vandc, + &B6_vec_vandc, + &B7_vec_vandc, + &B8_vec_vandc, + &B9_vec_vandc, + &B10_vec_vandc, + &B11_vec_vandc, + &B12_vec_vandc, + &B13_vec_vandc, + &B14_vec_vandc, + &B15_vec_vandc, + &B16_vec_vandc, + &B17_vec_vandc, + &B18_vec_vandc, + &B19_vec_vandc, + &B20_vec_vandc, + &B21_vec_vandc, + &B22_vec_vandc, + &B23_vec_vandc, + &B24_vec_vandc, +}; +static const struct builtin *const O_vec_any_eq[23] = { + &B1_vec_any_eq, + &B2_vec_any_eq, + &B3_vec_any_eq, + &B4_vec_any_eq, + &B5_vec_any_eq, + &B6_vec_any_eq, + &B7_vec_any_eq, + &B8_vec_any_eq, + &B9_vec_any_eq, + &B10_vec_any_eq, + &B11_vec_any_eq, + &B12_vec_any_eq, + &B13_vec_any_eq, + &B14_vec_any_eq, + &B15_vec_any_eq, + &B16_vec_any_eq, + &B17_vec_any_eq, + &B18_vec_any_eq, + &B19_vec_any_eq, + &B20_vec_any_eq, + &B21_vec_any_eq, + &B22_vec_any_eq, + &B23_vec_any_eq, +}; +static const struct builtin *const O_vec_any_ge[19] = { + &B1_vec_any_ge, + &B2_vec_any_ge, + &B3_vec_any_ge, + &B4_vec_any_ge, + &B5_vec_any_ge, + &B6_vec_any_ge, + &B7_vec_any_ge, + &B8_vec_any_ge, + &B9_vec_any_ge, + &B10_vec_any_ge, + &B11_vec_any_ge, + &B12_vec_any_ge, + &B13_vec_any_ge, + &B14_vec_any_ge, + &B15_vec_any_ge, + &B16_vec_any_ge, + &B17_vec_any_ge, + &B18_vec_any_ge, + &B19_vec_any_ge, +}; +static const struct builtin *const O_vec_any_gt[19] = { + &B1_vec_any_gt, + &B2_vec_any_gt, + &B3_vec_any_gt, + &B4_vec_any_gt, + &B5_vec_any_gt, + &B6_vec_any_gt, + &B7_vec_any_gt, + &B8_vec_any_gt, + &B9_vec_any_gt, + &B10_vec_any_gt, + &B11_vec_any_gt, + &B12_vec_any_gt, + &B13_vec_any_gt, + &B14_vec_any_gt, + &B15_vec_any_gt, + &B16_vec_any_gt, + &B17_vec_any_gt, + &B18_vec_any_gt, + &B19_vec_any_gt, +}; +static const struct builtin *const O_vec_any_le[19] = { + &B1_vec_any_le, + &B2_vec_any_le, + &B3_vec_any_le, + &B4_vec_any_le, + &B5_vec_any_le, + &B6_vec_any_le, + &B7_vec_any_le, + &B8_vec_any_le, + &B9_vec_any_le, + &B10_vec_any_le, + &B11_vec_any_le, + &B12_vec_any_le, + &B13_vec_any_le, + &B14_vec_any_le, + &B15_vec_any_le, + &B16_vec_any_le, + &B17_vec_any_le, + &B18_vec_any_le, + &B19_vec_any_le, +}; +static const struct builtin *const O_vec_any_lt[19] = { + &B1_vec_any_lt, + &B2_vec_any_lt, + &B3_vec_any_lt, + &B4_vec_any_lt, + &B5_vec_any_lt, + &B6_vec_any_lt, + &B7_vec_any_lt, + &B8_vec_any_lt, + &B9_vec_any_lt, + &B10_vec_any_lt, + &B11_vec_any_lt, + &B12_vec_any_lt, + &B13_vec_any_lt, + &B14_vec_any_lt, + &B15_vec_any_lt, + &B16_vec_any_lt, + &B17_vec_any_lt, + &B18_vec_any_lt, + &B19_vec_any_lt, +}; +static const struct builtin *const O_vec_any_nan[1] = { + &B_vec_any_nan, +}; +static const struct builtin *const O_vec_any_ne[23] = { + &B1_vec_any_ne, + &B2_vec_any_ne, + &B3_vec_any_ne, + &B4_vec_any_ne, + &B5_vec_any_ne, + &B6_vec_any_ne, + &B7_vec_any_ne, + &B8_vec_any_ne, + &B9_vec_any_ne, + &B10_vec_any_ne, + &B11_vec_any_ne, + &B12_vec_any_ne, + &B13_vec_any_ne, + &B14_vec_any_ne, + &B15_vec_any_ne, + &B16_vec_any_ne, + &B17_vec_any_ne, + &B18_vec_any_ne, + &B19_vec_any_ne, + &B20_vec_any_ne, + &B21_vec_any_ne, + &B22_vec_any_ne, + &B23_vec_any_ne, +}; +static const struct builtin *const O_vec_any_nge[1] = { + &B_vec_any_nge, +}; +static const struct builtin *const O_vec_any_ngt[1] = { + &B_vec_any_ngt, +}; +static const struct builtin *const O_vec_any_nle[1] = { + &B_vec_any_nle, +}; +static const struct builtin *const O_vec_any_nlt[1] = { + &B_vec_any_nlt, +}; +static const struct builtin *const O_vec_any_numeric[1] = { + &B_vec_any_numeric, +}; +static const struct builtin *const O_vec_any_out[1] = { + &B_vec_any_out, +}; +static const struct builtin *const O_vec_avg[6] = { + &B_vec_vavgsh, + &B_vec_vavgsw, + &B_vec_vavgsb, + &B_vec_vavguh, + &B_vec_vavguw, + &B_vec_vavgub, +}; +static const struct builtin *const O_vec_ceil[1] = { + &B_vec_vrfip, +}; +static const struct builtin *const O_vec_cmpb[1] = { + &B_vec_vcmpbfp, +}; +static const struct builtin *const O_vec_cmpeq[7] = { + &B_vec_vcmpeqfp, + &B1_vec_vcmpequh, + &B1_vec_vcmpequw, + &B1_vec_vcmpequb, + &B2_vec_vcmpequh, + &B2_vec_vcmpequw, + &B2_vec_vcmpequb, +}; +static const struct builtin *const O_vec_cmpge[1] = { + &B_vec_vcmpgefp, +}; +static const struct builtin *const O_vec_cmpgt[7] = { + &B_vec_vcmpgtfp, + &B_vec_vcmpgtsh, + &B_vec_vcmpgtsw, + &B_vec_vcmpgtsb, + &B_vec_vcmpgtuh, + &B_vec_vcmpgtuw, + &B_vec_vcmpgtub, +}; +static const struct builtin *const O_vec_cmple[1] = { + &B_vec_cmple, +}; +static const struct builtin *const O_vec_cmplt[7] = { + &B1_vec_cmplt, + &B2_vec_cmplt, + &B3_vec_cmplt, + &B4_vec_cmplt, + &B5_vec_cmplt, + &B6_vec_cmplt, + &B7_vec_cmplt, +}; +static const struct builtin *const O_vec_ctf[2] = { + &B_vec_vcfsx, + &B_vec_vcfux, +}; +static const struct builtin *const O_vec_cts[1] = { + &B_vec_vctsxs, +}; +static const struct builtin *const O_vec_ctu[1] = { + &B_vec_vctuxs, +}; +static const struct builtin *const O_vec_dss[1] = { + &B_vec_dss, +}; +static const struct builtin *const O_vec_dssall[1] = { + &B_vec_dssall, +}; +static const struct builtin *const O_vec_dst[20] = { + &B1_vec_dst, + &B2_vec_dst, + &B3_vec_dst, + &B4_vec_dst, + &B5_vec_dst, + &B6_vec_dst, + &B7_vec_dst, + &B8_vec_dst, + &B9_vec_dst, + &B10_vec_dst, + &B11_vec_dst, + &B12_vec_dst, + &B13_vec_dst, + &B14_vec_dst, + &B15_vec_dst, + &B16_vec_dst, + &B17_vec_dst, + &B18_vec_dst, + &B19_vec_dst, + &B20_vec_dst, +}; +static const struct builtin *const O_vec_dstst[20] = { + &B1_vec_dstst, + &B2_vec_dstst, + &B3_vec_dstst, + &B4_vec_dstst, + &B5_vec_dstst, + &B6_vec_dstst, + &B7_vec_dstst, + &B8_vec_dstst, + &B9_vec_dstst, + &B10_vec_dstst, + &B11_vec_dstst, + &B12_vec_dstst, + &B13_vec_dstst, + &B14_vec_dstst, + &B15_vec_dstst, + &B16_vec_dstst, + &B17_vec_dstst, + &B18_vec_dstst, + &B19_vec_dstst, + &B20_vec_dstst, +}; +static const struct builtin *const O_vec_dststt[20] = { + &B1_vec_dststt, + &B2_vec_dststt, + &B3_vec_dststt, + &B4_vec_dststt, + &B5_vec_dststt, + &B6_vec_dststt, + &B7_vec_dststt, + &B8_vec_dststt, + &B9_vec_dststt, + &B10_vec_dststt, + &B11_vec_dststt, + &B12_vec_dststt, + &B13_vec_dststt, + &B14_vec_dststt, + &B15_vec_dststt, + &B16_vec_dststt, + &B17_vec_dststt, + &B18_vec_dststt, + &B19_vec_dststt, + &B20_vec_dststt, +}; +static const struct builtin *const O_vec_dstt[20] = { + &B1_vec_dstt, + &B2_vec_dstt, + &B3_vec_dstt, + &B4_vec_dstt, + &B5_vec_dstt, + &B6_vec_dstt, + &B7_vec_dstt, + &B8_vec_dstt, + &B9_vec_dstt, + &B10_vec_dstt, + &B11_vec_dstt, + &B12_vec_dstt, + &B13_vec_dstt, + &B14_vec_dstt, + &B15_vec_dstt, + &B16_vec_dstt, + &B17_vec_dstt, + &B18_vec_dstt, + &B19_vec_dstt, + &B20_vec_dstt, +}; +static const struct builtin *const O_vec_expte[1] = { + &B_vec_vexptefp, +}; +static const struct builtin *const O_vec_floor[1] = { + &B_vec_vrfim, +}; +static const struct builtin *const O_vec_ld[20] = { + &B1_vec_lvx, + &B2_vec_lvx, + &B3_vec_lvx, + &B4_vec_lvx, + &B5_vec_lvx, + &B6_vec_lvx, + &B7_vec_lvx, + &B8_vec_lvx, + &B9_vec_lvx, + &B10_vec_lvx, + &B11_vec_lvx, + &B12_vec_lvx, + &B13_vec_lvx, + &B14_vec_lvx, + &B15_vec_lvx, + &B16_vec_lvx, + &B17_vec_lvx, + &B18_vec_lvx, + &B19_vec_lvx, + &B20_vec_lvx, +}; +static const struct builtin *const O_vec_lde[9] = { + &B1_vec_lvewx, + &B2_vec_lvewx, + &B3_vec_lvewx, + &B1_vec_lvehx, + &B1_vec_lvebx, + &B2_vec_lvebx, + &B4_vec_lvewx, + &B5_vec_lvewx, + &B2_vec_lvehx, +}; +static const struct builtin *const O_vec_ldl[20] = { + &B1_vec_lvxl, + &B2_vec_lvxl, + &B3_vec_lvxl, + &B4_vec_lvxl, + &B5_vec_lvxl, + &B6_vec_lvxl, + &B7_vec_lvxl, + &B8_vec_lvxl, + &B9_vec_lvxl, + &B10_vec_lvxl, + &B11_vec_lvxl, + &B12_vec_lvxl, + &B13_vec_lvxl, + &B14_vec_lvxl, + &B15_vec_lvxl, + &B16_vec_lvxl, + &B17_vec_lvxl, + &B18_vec_lvxl, + &B19_vec_lvxl, + &B20_vec_lvxl, +}; +static const struct builtin *const O_vec_loge[1] = { + &B_vec_vlogefp, +}; +static const struct builtin *const O_vec_lvebx[2] = { + &B1_vec_lvebx, + &B2_vec_lvebx, +}; +static const struct builtin *const O_vec_lvehx[2] = { + &B1_vec_lvehx, + &B2_vec_lvehx, +}; +static const struct builtin *const O_vec_lvewx[5] = { + &B1_vec_lvewx, + &B2_vec_lvewx, + &B3_vec_lvewx, + &B4_vec_lvewx, + &B5_vec_lvewx, +}; +static const struct builtin *const O_vec_lvsl[9] = { + &B1_vec_lvsl, + &B2_vec_lvsl, + &B3_vec_lvsl, + &B4_vec_lvsl, + &B5_vec_lvsl, + &B6_vec_lvsl, + &B7_vec_lvsl, + &B8_vec_lvsl, + &B9_vec_lvsl, +}; +static const struct builtin *const O_vec_lvsr[9] = { + &B1_vec_lvsr, + &B2_vec_lvsr, + &B3_vec_lvsr, + &B4_vec_lvsr, + &B5_vec_lvsr, + &B6_vec_lvsr, + &B7_vec_lvsr, + &B8_vec_lvsr, + &B9_vec_lvsr, +}; +static const struct builtin *const O_vec_lvx[20] = { + &B1_vec_lvx, + &B2_vec_lvx, + &B3_vec_lvx, + &B4_vec_lvx, + &B5_vec_lvx, + &B6_vec_lvx, + &B7_vec_lvx, + &B8_vec_lvx, + &B9_vec_lvx, + &B10_vec_lvx, + &B11_vec_lvx, + &B12_vec_lvx, + &B13_vec_lvx, + &B14_vec_lvx, + &B15_vec_lvx, + &B16_vec_lvx, + &B17_vec_lvx, + &B18_vec_lvx, + &B19_vec_lvx, + &B20_vec_lvx, +}; +static const struct builtin *const O_vec_lvxl[20] = { + &B1_vec_lvxl, + &B2_vec_lvxl, + &B3_vec_lvxl, + &B4_vec_lvxl, + &B5_vec_lvxl, + &B6_vec_lvxl, + &B7_vec_lvxl, + &B8_vec_lvxl, + &B9_vec_lvxl, + &B10_vec_lvxl, + &B11_vec_lvxl, + &B12_vec_lvxl, + &B13_vec_lvxl, + &B14_vec_lvxl, + &B15_vec_lvxl, + &B16_vec_lvxl, + &B17_vec_lvxl, + &B18_vec_lvxl, + &B19_vec_lvxl, + &B20_vec_lvxl, +}; +static const struct builtin *const O_vec_madd[1] = { + &B_vec_vmaddfp, +}; +static const struct builtin *const O_vec_madds[1] = { + &B_vec_vmhaddshs, +}; +static const struct builtin *const O_vec_max[19] = { + &B1_vec_vmaxsh, + &B1_vec_vmaxuh, + &B1_vec_vmaxsw, + &B1_vec_vmaxuw, + &B1_vec_vmaxsb, + &B1_vec_vmaxub, + &B_vec_vmaxfp, + &B2_vec_vmaxsh, + &B3_vec_vmaxsh, + &B2_vec_vmaxsw, + &B3_vec_vmaxsw, + &B2_vec_vmaxsb, + &B3_vec_vmaxsb, + &B2_vec_vmaxuh, + &B3_vec_vmaxuh, + &B2_vec_vmaxuw, + &B3_vec_vmaxuw, + &B2_vec_vmaxub, + &B3_vec_vmaxub, +}; +static const struct builtin *const O_vec_mergeh[11] = { + &B1_vec_vmrghh, + &B1_vec_vmrghw, + &B1_vec_vmrghb, + &B2_vec_vmrghw, + &B2_vec_vmrghh, + &B3_vec_vmrghh, + &B3_vec_vmrghw, + &B2_vec_vmrghb, + &B4_vec_vmrghh, + &B4_vec_vmrghw, + &B3_vec_vmrghb, +}; +static const struct builtin *const O_vec_mergel[11] = { + &B1_vec_vmrglh, + &B1_vec_vmrglw, + &B1_vec_vmrglb, + &B2_vec_vmrglw, + &B2_vec_vmrglh, + &B3_vec_vmrglh, + &B3_vec_vmrglw, + &B2_vec_vmrglb, + &B4_vec_vmrglh, + &B4_vec_vmrglw, + &B3_vec_vmrglb, +}; +static const struct builtin *const O_vec_mfvscr[1] = { + &B_vec_mfvscr, +}; +static const struct builtin *const O_vec_min[19] = { + &B1_vec_vminsh, + &B1_vec_vminuh, + &B1_vec_vminsw, + &B1_vec_vminuw, + &B1_vec_vminsb, + &B1_vec_vminub, + &B_vec_vminfp, + &B2_vec_vminsh, + &B3_vec_vminsh, + &B2_vec_vminsw, + &B3_vec_vminsw, + &B2_vec_vminsb, + &B3_vec_vminsb, + &B2_vec_vminuh, + &B3_vec_vminuh, + &B2_vec_vminuw, + &B3_vec_vminuw, + &B2_vec_vminub, + &B3_vec_vminub, +}; +static const struct builtin *const O_vec_mladd[4] = { + &B1_vec_vmladduhm, + &B2_vec_vmladduhm, + &B3_vec_vmladduhm, + &B4_vec_vmladduhm, +}; +static const struct builtin *const O_vec_mradds[1] = { + &B_vec_vmhraddshs, +}; +static const struct builtin *const O_vec_msum[4] = { + &B_vec_vmsumshm, + &B_vec_vmsummbm, + &B_vec_vmsumuhm, + &B_vec_vmsumubm, +}; +static const struct builtin *const O_vec_msums[2] = { + &B_vec_vmsumshs, + &B_vec_vmsumuhs, +}; +static const struct builtin *const O_vec_mtvscr[10] = { + &B1_vec_mtvscr, + &B2_vec_mtvscr, + &B3_vec_mtvscr, + &B4_vec_mtvscr, + &B5_vec_mtvscr, + &B6_vec_mtvscr, + &B7_vec_mtvscr, + &B8_vec_mtvscr, + &B9_vec_mtvscr, + &B10_vec_mtvscr, +}; +static const struct builtin *const O_vec_mule[4] = { + &B_vec_vmulesh, + &B_vec_vmulesb, + &B_vec_vmuleuh, + &B_vec_vmuleub, +}; +static const struct builtin *const O_vec_mulo[4] = { + &B_vec_vmulosh, + &B_vec_vmulosb, + &B_vec_vmulouh, + &B_vec_vmuloub, +}; +static const struct builtin *const O_vec_nmsub[1] = { + &B_vec_vnmsubfp, +}; +static const struct builtin *const O_vec_nor[10] = { + &B1_vec_vnor, + &B2_vec_vnor, + &B3_vec_vnor, + &B4_vec_vnor, + &B5_vec_vnor, + &B6_vec_vnor, + &B7_vec_vnor, + &B8_vec_vnor, + &B9_vec_vnor, + &B10_vec_vnor, +}; +static const struct builtin *const O_vec_or[24] = { + &B1_vec_vor, + &B2_vec_vor, + &B3_vec_vor, + &B4_vec_vor, + &B5_vec_vor, + &B6_vec_vor, + &B7_vec_vor, + &B8_vec_vor, + &B9_vec_vor, + &B10_vec_vor, + &B11_vec_vor, + &B12_vec_vor, + &B13_vec_vor, + &B14_vec_vor, + &B15_vec_vor, + &B16_vec_vor, + &B17_vec_vor, + &B18_vec_vor, + &B19_vec_vor, + &B20_vec_vor, + &B21_vec_vor, + &B22_vec_vor, + &B23_vec_vor, + &B24_vec_vor, +}; +static const struct builtin *const O_vec_pack[6] = { + &B1_vec_vpkuhum, + &B1_vec_vpkuwum, + &B2_vec_vpkuhum, + &B2_vec_vpkuwum, + &B3_vec_vpkuhum, + &B3_vec_vpkuwum, +}; +static const struct builtin *const O_vec_packpx[1] = { + &B_vec_vpkpx, +}; +static const struct builtin *const O_vec_packs[4] = { + &B_vec_vpkshss, + &B_vec_vpkswss, + &B_vec_vpkuhus, + &B_vec_vpkuwus, +}; +static const struct builtin *const O_vec_packsu[4] = { + &B_vec_vpkshus, + &B_vec_vpkswus, + &B_vec_vpkuhus, + &B_vec_vpkuwus, +}; +static const struct builtin *const O_vec_perm[11] = { + &B1_vec_vperm, + &B2_vec_vperm, + &B3_vec_vperm, + &B4_vec_vperm, + &B5_vec_vperm, + &B6_vec_vperm, + &B7_vec_vperm, + &B8_vec_vperm, + &B9_vec_vperm, + &B10_vec_vperm, + &B11_vec_vperm, +}; +static const struct builtin *const O_vec_re[1] = { + &B_vec_vrefp, +}; +static const struct builtin *const O_vec_rl[6] = { + &B1_vec_vrlh, + &B1_vec_vrlw, + &B1_vec_vrlb, + &B2_vec_vrlh, + &B2_vec_vrlw, + &B2_vec_vrlb, +}; +static const struct builtin *const O_vec_round[1] = { + &B_vec_vrfin, +}; +static const struct builtin *const O_vec_rsqrte[1] = { + &B_vec_vrsqrtefp, +}; +static const struct builtin *const O_vec_sel[20] = { + &B1_vec_vsel, + &B2_vec_vsel, + &B3_vec_vsel, + &B4_vec_vsel, + &B5_vec_vsel, + &B6_vec_vsel, + &B7_vec_vsel, + &B8_vec_vsel, + &B9_vec_vsel, + &B10_vec_vsel, + &B11_vec_vsel, + &B12_vec_vsel, + &B13_vec_vsel, + &B14_vec_vsel, + &B15_vec_vsel, + &B16_vec_vsel, + &B17_vec_vsel, + &B18_vec_vsel, + &B19_vec_vsel, + &B20_vec_vsel, +}; +static const struct builtin *const O_vec_sl[6] = { + &B1_vec_vslh, + &B1_vec_vslw, + &B1_vec_vslb, + &B2_vec_vslh, + &B2_vec_vslw, + &B2_vec_vslb, +}; +static const struct builtin *const O_vec_sld[11] = { + &B1_vec_vsldoi, + &B2_vec_vsldoi, + &B3_vec_vsldoi, + &B4_vec_vsldoi, + &B5_vec_vsldoi, + &B6_vec_vsldoi, + &B7_vec_vsldoi, + &B8_vec_vsldoi, + &B9_vec_vsldoi, + &B10_vec_vsldoi, + &B11_vec_vsldoi, +}; +static const struct builtin *const O_vec_sll[30] = { + &B1_vec_vsl, + &B2_vec_vsl, + &B3_vec_vsl, + &B4_vec_vsl, + &B5_vec_vsl, + &B6_vec_vsl, + &B7_vec_vsl, + &B8_vec_vsl, + &B9_vec_vsl, + &B10_vec_vsl, + &B11_vec_vsl, + &B12_vec_vsl, + &B13_vec_vsl, + &B14_vec_vsl, + &B15_vec_vsl, + &B16_vec_vsl, + &B17_vec_vsl, + &B18_vec_vsl, + &B19_vec_vsl, + &B20_vec_vsl, + &B21_vec_vsl, + &B22_vec_vsl, + &B23_vec_vsl, + &B24_vec_vsl, + &B25_vec_vsl, + &B26_vec_vsl, + &B27_vec_vsl, + &B28_vec_vsl, + &B29_vec_vsl, + &B30_vec_vsl, +}; +static const struct builtin *const O_vec_slo[16] = { + &B1_vec_vslo, + &B2_vec_vslo, + &B3_vec_vslo, + &B4_vec_vslo, + &B5_vec_vslo, + &B6_vec_vslo, + &B7_vec_vslo, + &B8_vec_vslo, + &B9_vec_vslo, + &B10_vec_vslo, + &B11_vec_vslo, + &B12_vec_vslo, + &B13_vec_vslo, + &B14_vec_vslo, + &B15_vec_vslo, + &B16_vec_vslo, +}; +static const struct builtin *const O_vec_splat[11] = { + &B1_vec_vsplth, + &B1_vec_vspltw, + &B1_vec_vspltb, + &B2_vec_vspltw, + &B2_vec_vsplth, + &B3_vec_vsplth, + &B3_vec_vspltw, + &B2_vec_vspltb, + &B4_vec_vsplth, + &B4_vec_vspltw, + &B3_vec_vspltb, +}; +static const struct builtin *const O_vec_splat_s16[1] = { + &B_vec_vspltish, +}; +static const struct builtin *const O_vec_splat_s32[1] = { + &B_vec_vspltisw, +}; +static const struct builtin *const O_vec_splat_s8[1] = { + &B_vec_vspltisb, +}; +static const struct builtin *const O_vec_splat_u16[1] = { + &B_vec_splat_u16, +}; +static const struct builtin *const O_vec_splat_u32[1] = { + &B_vec_splat_u32, +}; +static const struct builtin *const O_vec_splat_u8[1] = { + &B_vec_splat_u8, +}; +static const struct builtin *const O_vec_sr[6] = { + &B1_vec_vsrh, + &B1_vec_vsrw, + &B1_vec_vsrb, + &B2_vec_vsrh, + &B2_vec_vsrw, + &B2_vec_vsrb, +}; +static const struct builtin *const O_vec_sra[6] = { + &B1_vec_vsrah, + &B1_vec_vsraw, + &B1_vec_vsrab, + &B2_vec_vsrah, + &B2_vec_vsraw, + &B2_vec_vsrab, +}; +static const struct builtin *const O_vec_srl[30] = { + &B1_vec_vsr, + &B2_vec_vsr, + &B3_vec_vsr, + &B4_vec_vsr, + &B5_vec_vsr, + &B6_vec_vsr, + &B7_vec_vsr, + &B8_vec_vsr, + &B9_vec_vsr, + &B10_vec_vsr, + &B11_vec_vsr, + &B12_vec_vsr, + &B13_vec_vsr, + &B14_vec_vsr, + &B15_vec_vsr, + &B16_vec_vsr, + &B17_vec_vsr, + &B18_vec_vsr, + &B19_vec_vsr, + &B20_vec_vsr, + &B21_vec_vsr, + &B22_vec_vsr, + &B23_vec_vsr, + &B24_vec_vsr, + &B25_vec_vsr, + &B26_vec_vsr, + &B27_vec_vsr, + &B28_vec_vsr, + &B29_vec_vsr, + &B30_vec_vsr, +}; +static const struct builtin *const O_vec_sro[16] = { + &B1_vec_vsro, + &B2_vec_vsro, + &B3_vec_vsro, + &B4_vec_vsro, + &B5_vec_vsro, + &B6_vec_vsro, + &B7_vec_vsro, + &B8_vec_vsro, + &B9_vec_vsro, + &B10_vec_vsro, + &B11_vec_vsro, + &B12_vec_vsro, + &B13_vec_vsro, + &B14_vec_vsro, + &B15_vec_vsro, + &B16_vec_vsro, +}; +static const struct builtin *const O_vec_st[30] = { + &B1_vec_stvx, + &B2_vec_stvx, + &B3_vec_stvx, + &B4_vec_stvx, + &B5_vec_stvx, + &B6_vec_stvx, + &B7_vec_stvx, + &B8_vec_stvx, + &B9_vec_stvx, + &B10_vec_stvx, + &B11_vec_stvx, + &B12_vec_stvx, + &B13_vec_stvx, + &B14_vec_stvx, + &B15_vec_stvx, + &B16_vec_stvx, + &B17_vec_stvx, + &B18_vec_stvx, + &B19_vec_stvx, + &B20_vec_stvx, + &B21_vec_stvx, + &B22_vec_stvx, + &B23_vec_stvx, + &B24_vec_stvx, + &B25_vec_stvx, + &B26_vec_stvx, + &B27_vec_stvx, + &B28_vec_stvx, + &B29_vec_stvx, + &B30_vec_stvx, +}; +static const struct builtin *const O_vec_ste[19] = { + &B1_vec_stvebx, + &B2_vec_stvebx, + &B1_vec_stvewx, + &B2_vec_stvewx, + &B3_vec_stvewx, + &B4_vec_stvewx, + &B3_vec_stvebx, + &B4_vec_stvebx, + &B5_vec_stvewx, + &B1_vec_stvehx, + &B2_vec_stvehx, + &B3_vec_stvehx, + &B6_vec_stvewx, + &B7_vec_stvewx, + &B5_vec_stvebx, + &B4_vec_stvehx, + &B8_vec_stvewx, + &B9_vec_stvewx, + &B6_vec_stvebx, +}; +static const struct builtin *const O_vec_stl[30] = { + &B1_vec_stvxl, + &B2_vec_stvxl, + &B3_vec_stvxl, + &B4_vec_stvxl, + &B5_vec_stvxl, + &B6_vec_stvxl, + &B7_vec_stvxl, + &B8_vec_stvxl, + &B9_vec_stvxl, + &B10_vec_stvxl, + &B11_vec_stvxl, + &B12_vec_stvxl, + &B13_vec_stvxl, + &B14_vec_stvxl, + &B15_vec_stvxl, + &B16_vec_stvxl, + &B17_vec_stvxl, + &B18_vec_stvxl, + &B19_vec_stvxl, + &B20_vec_stvxl, + &B21_vec_stvxl, + &B22_vec_stvxl, + &B23_vec_stvxl, + &B24_vec_stvxl, + &B25_vec_stvxl, + &B26_vec_stvxl, + &B27_vec_stvxl, + &B28_vec_stvxl, + &B29_vec_stvxl, + &B30_vec_stvxl, +}; +static const struct builtin *const O_vec_stvebx[6] = { + &B1_vec_stvebx, + &B2_vec_stvebx, + &B3_vec_stvebx, + &B4_vec_stvebx, + &B5_vec_stvebx, + &B6_vec_stvebx, +}; +static const struct builtin *const O_vec_stvehx[4] = { + &B1_vec_stvehx, + &B2_vec_stvehx, + &B3_vec_stvehx, + &B4_vec_stvehx, +}; +static const struct builtin *const O_vec_stvewx[9] = { + &B1_vec_stvewx, + &B2_vec_stvewx, + &B3_vec_stvewx, + &B4_vec_stvewx, + &B5_vec_stvewx, + &B6_vec_stvewx, + &B7_vec_stvewx, + &B8_vec_stvewx, + &B9_vec_stvewx, +}; +static const struct builtin *const O_vec_stvx[30] = { + &B1_vec_stvx, + &B2_vec_stvx, + &B3_vec_stvx, + &B4_vec_stvx, + &B5_vec_stvx, + &B6_vec_stvx, + &B7_vec_stvx, + &B8_vec_stvx, + &B9_vec_stvx, + &B10_vec_stvx, + &B11_vec_stvx, + &B12_vec_stvx, + &B13_vec_stvx, + &B14_vec_stvx, + &B15_vec_stvx, + &B16_vec_stvx, + &B17_vec_stvx, + &B18_vec_stvx, + &B19_vec_stvx, + &B20_vec_stvx, + &B21_vec_stvx, + &B22_vec_stvx, + &B23_vec_stvx, + &B24_vec_stvx, + &B25_vec_stvx, + &B26_vec_stvx, + &B27_vec_stvx, + &B28_vec_stvx, + &B29_vec_stvx, + &B30_vec_stvx, +}; +static const struct builtin *const O_vec_stvxl[30] = { + &B1_vec_stvxl, + &B2_vec_stvxl, + &B3_vec_stvxl, + &B4_vec_stvxl, + &B5_vec_stvxl, + &B6_vec_stvxl, + &B7_vec_stvxl, + &B8_vec_stvxl, + &B9_vec_stvxl, + &B10_vec_stvxl, + &B11_vec_stvxl, + &B12_vec_stvxl, + &B13_vec_stvxl, + &B14_vec_stvxl, + &B15_vec_stvxl, + &B16_vec_stvxl, + &B17_vec_stvxl, + &B18_vec_stvxl, + &B19_vec_stvxl, + &B20_vec_stvxl, + &B21_vec_stvxl, + &B22_vec_stvxl, + &B23_vec_stvxl, + &B24_vec_stvxl, + &B25_vec_stvxl, + &B26_vec_stvxl, + &B27_vec_stvxl, + &B28_vec_stvxl, + &B29_vec_stvxl, + &B30_vec_stvxl, +}; +static const struct builtin *const O_vec_sub[19] = { + &B1_vec_vsubuhm, + &B2_vec_vsubuhm, + &B1_vec_vsubuwm, + &B2_vec_vsubuwm, + &B1_vec_vsububm, + &B2_vec_vsububm, + &B_vec_vsubfp, + &B3_vec_vsubuhm, + &B4_vec_vsubuhm, + &B3_vec_vsubuwm, + &B4_vec_vsubuwm, + &B3_vec_vsububm, + &B4_vec_vsububm, + &B5_vec_vsubuhm, + &B6_vec_vsubuhm, + &B5_vec_vsubuwm, + &B6_vec_vsubuwm, + &B5_vec_vsububm, + &B6_vec_vsububm, +}; +static const struct builtin *const O_vec_subc[1] = { + &B_vec_vsubcuw, +}; +static const struct builtin *const O_vec_subs[18] = { + &B1_vec_vsubshs, + &B1_vec_vsubuhs, + &B1_vec_vsubsws, + &B1_vec_vsubuws, + &B1_vec_vsubsbs, + &B1_vec_vsububs, + &B2_vec_vsubshs, + &B3_vec_vsubshs, + &B2_vec_vsubsws, + &B3_vec_vsubsws, + &B2_vec_vsubsbs, + &B3_vec_vsubsbs, + &B2_vec_vsubuhs, + &B3_vec_vsubuhs, + &B2_vec_vsubuws, + &B3_vec_vsubuws, + &B2_vec_vsububs, + &B3_vec_vsububs, +}; +static const struct builtin *const O_vec_sum2s[1] = { + &B_vec_vsum2sws, +}; +static const struct builtin *const O_vec_sum4s[3] = { + &B_vec_vsum4shs, + &B_vec_vsum4sbs, + &B_vec_vsum4ubs, +}; +static const struct builtin *const O_vec_sums[1] = { + &B_vec_vsumsws, +}; +static const struct builtin *const O_vec_trunc[1] = { + &B_vec_vrfiz, +}; +static const struct builtin *const O_vec_unpack2sh[2] = { + &B1_vec_unpack2sh, + &B2_vec_unpack2sh, +}; +static const struct builtin *const O_vec_unpack2sl[2] = { + &B1_vec_unpack2sl, + &B2_vec_unpack2sl, +}; +static const struct builtin *const O_vec_unpack2uh[2] = { + &B1_vec_unpack2uh, + &B2_vec_unpack2uh, +}; +static const struct builtin *const O_vec_unpack2ul[2] = { + &B1_vec_unpack2ul, + &B2_vec_unpack2ul, +}; +static const struct builtin *const O_vec_unpackh[5] = { + &B1_vec_vupkhsh, + &B1_vec_vupkhsb, + &B_vec_vupkhpx, + &B2_vec_vupkhsh, + &B2_vec_vupkhsb, +}; +static const struct builtin *const O_vec_unpackl[5] = { + &B1_vec_vupklsh, + &B1_vec_vupklsb, + &B_vec_vupklpx, + &B2_vec_vupklsh, + &B2_vec_vupklsb, +}; +static const struct builtin *const O_vec_vaddcuw[1] = { + &B_vec_vaddcuw, +}; +static const struct builtin *const O_vec_vaddfp[1] = { + &B_vec_vaddfp, +}; +static const struct builtin *const O_vec_vaddsbs[3] = { + &B1_vec_vaddsbs, + &B2_vec_vaddsbs, + &B3_vec_vaddsbs, +}; +static const struct builtin *const O_vec_vaddshs[3] = { + &B1_vec_vaddshs, + &B2_vec_vaddshs, + &B3_vec_vaddshs, +}; +static const struct builtin *const O_vec_vaddsws[3] = { + &B1_vec_vaddsws, + &B2_vec_vaddsws, + &B3_vec_vaddsws, +}; +static const struct builtin *const O_vec_vaddubm[6] = { + &B1_vec_vaddubm, + &B2_vec_vaddubm, + &B3_vec_vaddubm, + &B4_vec_vaddubm, + &B5_vec_vaddubm, + &B6_vec_vaddubm, +}; +static const struct builtin *const O_vec_vaddubs[3] = { + &B1_vec_vaddubs, + &B2_vec_vaddubs, + &B3_vec_vaddubs, +}; +static const struct builtin *const O_vec_vadduhm[6] = { + &B1_vec_vadduhm, + &B2_vec_vadduhm, + &B3_vec_vadduhm, + &B4_vec_vadduhm, + &B5_vec_vadduhm, + &B6_vec_vadduhm, +}; +static const struct builtin *const O_vec_vadduhs[3] = { + &B1_vec_vadduhs, + &B2_vec_vadduhs, + &B3_vec_vadduhs, +}; +static const struct builtin *const O_vec_vadduwm[6] = { + &B1_vec_vadduwm, + &B2_vec_vadduwm, + &B3_vec_vadduwm, + &B4_vec_vadduwm, + &B5_vec_vadduwm, + &B6_vec_vadduwm, +}; +static const struct builtin *const O_vec_vadduws[3] = { + &B1_vec_vadduws, + &B2_vec_vadduws, + &B3_vec_vadduws, +}; +static const struct builtin *const O_vec_vand[24] = { + &B1_vec_vand, + &B2_vec_vand, + &B3_vec_vand, + &B4_vec_vand, + &B5_vec_vand, + &B6_vec_vand, + &B7_vec_vand, + &B8_vec_vand, + &B9_vec_vand, + &B10_vec_vand, + &B11_vec_vand, + &B12_vec_vand, + &B13_vec_vand, + &B14_vec_vand, + &B15_vec_vand, + &B16_vec_vand, + &B17_vec_vand, + &B18_vec_vand, + &B19_vec_vand, + &B20_vec_vand, + &B21_vec_vand, + &B22_vec_vand, + &B23_vec_vand, + &B24_vec_vand, +}; +static const struct builtin *const O_vec_vandc[24] = { + &B1_vec_vandc, + &B2_vec_vandc, + &B3_vec_vandc, + &B4_vec_vandc, + &B5_vec_vandc, + &B6_vec_vandc, + &B7_vec_vandc, + &B8_vec_vandc, + &B9_vec_vandc, + &B10_vec_vandc, + &B11_vec_vandc, + &B12_vec_vandc, + &B13_vec_vandc, + &B14_vec_vandc, + &B15_vec_vandc, + &B16_vec_vandc, + &B17_vec_vandc, + &B18_vec_vandc, + &B19_vec_vandc, + &B20_vec_vandc, + &B21_vec_vandc, + &B22_vec_vandc, + &B23_vec_vandc, + &B24_vec_vandc, +}; +static const struct builtin *const O_vec_vavgsb[1] = { + &B_vec_vavgsb, +}; +static const struct builtin *const O_vec_vavgsh[1] = { + &B_vec_vavgsh, +}; +static const struct builtin *const O_vec_vavgsw[1] = { + &B_vec_vavgsw, +}; +static const struct builtin *const O_vec_vavgub[1] = { + &B_vec_vavgub, +}; +static const struct builtin *const O_vec_vavguh[1] = { + &B_vec_vavguh, +}; +static const struct builtin *const O_vec_vavguw[1] = { + &B_vec_vavguw, +}; +static const struct builtin *const O_vec_vcfsx[1] = { + &B_vec_vcfsx, +}; +static const struct builtin *const O_vec_vcfux[1] = { + &B_vec_vcfux, +}; +static const struct builtin *const O_vec_vcmpbfp[1] = { + &B_vec_vcmpbfp, +}; +static const struct builtin *const O_vec_vcmpeqfp[1] = { + &B_vec_vcmpeqfp, +}; +static const struct builtin *const O_vec_vcmpequb[2] = { + &B1_vec_vcmpequb, + &B2_vec_vcmpequb, +}; +static const struct builtin *const O_vec_vcmpequh[2] = { + &B1_vec_vcmpequh, + &B2_vec_vcmpequh, +}; +static const struct builtin *const O_vec_vcmpequw[2] = { + &B1_vec_vcmpequw, + &B2_vec_vcmpequw, +}; +static const struct builtin *const O_vec_vcmpgefp[1] = { + &B_vec_vcmpgefp, +}; +static const struct builtin *const O_vec_vcmpgtfp[1] = { + &B_vec_vcmpgtfp, +}; +static const struct builtin *const O_vec_vcmpgtsb[1] = { + &B_vec_vcmpgtsb, +}; +static const struct builtin *const O_vec_vcmpgtsh[1] = { + &B_vec_vcmpgtsh, +}; +static const struct builtin *const O_vec_vcmpgtsw[1] = { + &B_vec_vcmpgtsw, +}; +static const struct builtin *const O_vec_vcmpgtub[1] = { + &B_vec_vcmpgtub, +}; +static const struct builtin *const O_vec_vcmpgtuh[1] = { + &B_vec_vcmpgtuh, +}; +static const struct builtin *const O_vec_vcmpgtuw[1] = { + &B_vec_vcmpgtuw, +}; +static const struct builtin *const O_vec_vctsxs[1] = { + &B_vec_vctsxs, +}; +static const struct builtin *const O_vec_vctuxs[1] = { + &B_vec_vctuxs, +}; +static const struct builtin *const O_vec_vexptefp[1] = { + &B_vec_vexptefp, +}; +static const struct builtin *const O_vec_vlogefp[1] = { + &B_vec_vlogefp, +}; +static const struct builtin *const O_vec_vmaddfp[1] = { + &B_vec_vmaddfp, +}; +static const struct builtin *const O_vec_vmaxfp[1] = { + &B_vec_vmaxfp, +}; +static const struct builtin *const O_vec_vmaxsb[3] = { + &B1_vec_vmaxsb, + &B2_vec_vmaxsb, + &B3_vec_vmaxsb, +}; +static const struct builtin *const O_vec_vmaxsh[3] = { + &B1_vec_vmaxsh, + &B2_vec_vmaxsh, + &B3_vec_vmaxsh, +}; +static const struct builtin *const O_vec_vmaxsw[3] = { + &B1_vec_vmaxsw, + &B2_vec_vmaxsw, + &B3_vec_vmaxsw, +}; +static const struct builtin *const O_vec_vmaxub[3] = { + &B1_vec_vmaxub, + &B2_vec_vmaxub, + &B3_vec_vmaxub, +}; +static const struct builtin *const O_vec_vmaxuh[3] = { + &B1_vec_vmaxuh, + &B2_vec_vmaxuh, + &B3_vec_vmaxuh, +}; +static const struct builtin *const O_vec_vmaxuw[3] = { + &B1_vec_vmaxuw, + &B2_vec_vmaxuw, + &B3_vec_vmaxuw, +}; +static const struct builtin *const O_vec_vmhaddshs[1] = { + &B_vec_vmhaddshs, +}; +static const struct builtin *const O_vec_vmhraddshs[1] = { + &B_vec_vmhraddshs, +}; +static const struct builtin *const O_vec_vminfp[1] = { + &B_vec_vminfp, +}; +static const struct builtin *const O_vec_vminsb[3] = { + &B1_vec_vminsb, + &B2_vec_vminsb, + &B3_vec_vminsb, +}; +static const struct builtin *const O_vec_vminsh[3] = { + &B1_vec_vminsh, + &B2_vec_vminsh, + &B3_vec_vminsh, +}; +static const struct builtin *const O_vec_vminsw[3] = { + &B1_vec_vminsw, + &B2_vec_vminsw, + &B3_vec_vminsw, +}; +static const struct builtin *const O_vec_vminub[3] = { + &B1_vec_vminub, + &B2_vec_vminub, + &B3_vec_vminub, +}; +static const struct builtin *const O_vec_vminuh[3] = { + &B1_vec_vminuh, + &B2_vec_vminuh, + &B3_vec_vminuh, +}; +static const struct builtin *const O_vec_vminuw[3] = { + &B1_vec_vminuw, + &B2_vec_vminuw, + &B3_vec_vminuw, +}; +static const struct builtin *const O_vec_vmladduhm[4] = { + &B1_vec_vmladduhm, + &B2_vec_vmladduhm, + &B3_vec_vmladduhm, + &B4_vec_vmladduhm, +}; +static const struct builtin *const O_vec_vmrghb[3] = { + &B1_vec_vmrghb, + &B2_vec_vmrghb, + &B3_vec_vmrghb, +}; +static const struct builtin *const O_vec_vmrghh[4] = { + &B1_vec_vmrghh, + &B2_vec_vmrghh, + &B3_vec_vmrghh, + &B4_vec_vmrghh, +}; +static const struct builtin *const O_vec_vmrghw[4] = { + &B1_vec_vmrghw, + &B2_vec_vmrghw, + &B3_vec_vmrghw, + &B4_vec_vmrghw, +}; +static const struct builtin *const O_vec_vmrglb[3] = { + &B1_vec_vmrglb, + &B2_vec_vmrglb, + &B3_vec_vmrglb, +}; +static const struct builtin *const O_vec_vmrglh[4] = { + &B1_vec_vmrglh, + &B2_vec_vmrglh, + &B3_vec_vmrglh, + &B4_vec_vmrglh, +}; +static const struct builtin *const O_vec_vmrglw[4] = { + &B1_vec_vmrglw, + &B2_vec_vmrglw, + &B3_vec_vmrglw, + &B4_vec_vmrglw, +}; +static const struct builtin *const O_vec_vmsummbm[1] = { + &B_vec_vmsummbm, +}; +static const struct builtin *const O_vec_vmsumshm[1] = { + &B_vec_vmsumshm, +}; +static const struct builtin *const O_vec_vmsumshs[1] = { + &B_vec_vmsumshs, +}; +static const struct builtin *const O_vec_vmsumubm[1] = { + &B_vec_vmsumubm, +}; +static const struct builtin *const O_vec_vmsumuhm[1] = { + &B_vec_vmsumuhm, +}; +static const struct builtin *const O_vec_vmsumuhs[1] = { + &B_vec_vmsumuhs, +}; +static const struct builtin *const O_vec_vmulesb[1] = { + &B_vec_vmulesb, +}; +static const struct builtin *const O_vec_vmulesh[1] = { + &B_vec_vmulesh, +}; +static const struct builtin *const O_vec_vmuleub[1] = { + &B_vec_vmuleub, +}; +static const struct builtin *const O_vec_vmuleuh[1] = { + &B_vec_vmuleuh, +}; +static const struct builtin *const O_vec_vmulosb[1] = { + &B_vec_vmulosb, +}; +static const struct builtin *const O_vec_vmulosh[1] = { + &B_vec_vmulosh, +}; +static const struct builtin *const O_vec_vmuloub[1] = { + &B_vec_vmuloub, +}; +static const struct builtin *const O_vec_vmulouh[1] = { + &B_vec_vmulouh, +}; +static const struct builtin *const O_vec_vnmsubfp[1] = { + &B_vec_vnmsubfp, +}; +static const struct builtin *const O_vec_vnor[10] = { + &B1_vec_vnor, + &B2_vec_vnor, + &B3_vec_vnor, + &B4_vec_vnor, + &B5_vec_vnor, + &B6_vec_vnor, + &B7_vec_vnor, + &B8_vec_vnor, + &B9_vec_vnor, + &B10_vec_vnor, +}; +static const struct builtin *const O_vec_vor[24] = { + &B1_vec_vor, + &B2_vec_vor, + &B3_vec_vor, + &B4_vec_vor, + &B5_vec_vor, + &B6_vec_vor, + &B7_vec_vor, + &B8_vec_vor, + &B9_vec_vor, + &B10_vec_vor, + &B11_vec_vor, + &B12_vec_vor, + &B13_vec_vor, + &B14_vec_vor, + &B15_vec_vor, + &B16_vec_vor, + &B17_vec_vor, + &B18_vec_vor, + &B19_vec_vor, + &B20_vec_vor, + &B21_vec_vor, + &B22_vec_vor, + &B23_vec_vor, + &B24_vec_vor, +}; +static const struct builtin *const O_vec_vperm[11] = { + &B1_vec_vperm, + &B2_vec_vperm, + &B3_vec_vperm, + &B4_vec_vperm, + &B5_vec_vperm, + &B6_vec_vperm, + &B7_vec_vperm, + &B8_vec_vperm, + &B9_vec_vperm, + &B10_vec_vperm, + &B11_vec_vperm, +}; +static const struct builtin *const O_vec_vpkpx[1] = { + &B_vec_vpkpx, +}; +static const struct builtin *const O_vec_vpkshss[1] = { + &B_vec_vpkshss, +}; +static const struct builtin *const O_vec_vpkshus[1] = { + &B_vec_vpkshus, +}; +static const struct builtin *const O_vec_vpkswss[1] = { + &B_vec_vpkswss, +}; +static const struct builtin *const O_vec_vpkswus[1] = { + &B_vec_vpkswus, +}; +static const struct builtin *const O_vec_vpkuhum[3] = { + &B1_vec_vpkuhum, + &B2_vec_vpkuhum, + &B3_vec_vpkuhum, +}; +static const struct builtin *const O_vec_vpkuhus[1] = { + &B_vec_vpkuhus, +}; +static const struct builtin *const O_vec_vpkuwum[3] = { + &B1_vec_vpkuwum, + &B2_vec_vpkuwum, + &B3_vec_vpkuwum, +}; +static const struct builtin *const O_vec_vpkuwus[1] = { + &B_vec_vpkuwus, +}; +static const struct builtin *const O_vec_vrefp[1] = { + &B_vec_vrefp, +}; +static const struct builtin *const O_vec_vrfim[1] = { + &B_vec_vrfim, +}; +static const struct builtin *const O_vec_vrfin[1] = { + &B_vec_vrfin, +}; +static const struct builtin *const O_vec_vrfip[1] = { + &B_vec_vrfip, +}; +static const struct builtin *const O_vec_vrfiz[1] = { + &B_vec_vrfiz, +}; +static const struct builtin *const O_vec_vrlb[2] = { + &B1_vec_vrlb, + &B2_vec_vrlb, +}; +static const struct builtin *const O_vec_vrlh[2] = { + &B1_vec_vrlh, + &B2_vec_vrlh, +}; +static const struct builtin *const O_vec_vrlw[2] = { + &B1_vec_vrlw, + &B2_vec_vrlw, +}; +static const struct builtin *const O_vec_vrsqrtefp[1] = { + &B_vec_vrsqrtefp, +}; +static const struct builtin *const O_vec_vsel[20] = { + &B1_vec_vsel, + &B2_vec_vsel, + &B3_vec_vsel, + &B4_vec_vsel, + &B5_vec_vsel, + &B6_vec_vsel, + &B7_vec_vsel, + &B8_vec_vsel, + &B9_vec_vsel, + &B10_vec_vsel, + &B11_vec_vsel, + &B12_vec_vsel, + &B13_vec_vsel, + &B14_vec_vsel, + &B15_vec_vsel, + &B16_vec_vsel, + &B17_vec_vsel, + &B18_vec_vsel, + &B19_vec_vsel, + &B20_vec_vsel, +}; +static const struct builtin *const O_vec_vsl[30] = { + &B1_vec_vsl, + &B2_vec_vsl, + &B3_vec_vsl, + &B4_vec_vsl, + &B5_vec_vsl, + &B6_vec_vsl, + &B7_vec_vsl, + &B8_vec_vsl, + &B9_vec_vsl, + &B10_vec_vsl, + &B11_vec_vsl, + &B12_vec_vsl, + &B13_vec_vsl, + &B14_vec_vsl, + &B15_vec_vsl, + &B16_vec_vsl, + &B17_vec_vsl, + &B18_vec_vsl, + &B19_vec_vsl, + &B20_vec_vsl, + &B21_vec_vsl, + &B22_vec_vsl, + &B23_vec_vsl, + &B24_vec_vsl, + &B25_vec_vsl, + &B26_vec_vsl, + &B27_vec_vsl, + &B28_vec_vsl, + &B29_vec_vsl, + &B30_vec_vsl, +}; +static const struct builtin *const O_vec_vslb[2] = { + &B1_vec_vslb, + &B2_vec_vslb, +}; +static const struct builtin *const O_vec_vsldoi[11] = { + &B1_vec_vsldoi, + &B2_vec_vsldoi, + &B3_vec_vsldoi, + &B4_vec_vsldoi, + &B5_vec_vsldoi, + &B6_vec_vsldoi, + &B7_vec_vsldoi, + &B8_vec_vsldoi, + &B9_vec_vsldoi, + &B10_vec_vsldoi, + &B11_vec_vsldoi, +}; +static const struct builtin *const O_vec_vslh[2] = { + &B1_vec_vslh, + &B2_vec_vslh, +}; +static const struct builtin *const O_vec_vslo[16] = { + &B1_vec_vslo, + &B2_vec_vslo, + &B3_vec_vslo, + &B4_vec_vslo, + &B5_vec_vslo, + &B6_vec_vslo, + &B7_vec_vslo, + &B8_vec_vslo, + &B9_vec_vslo, + &B10_vec_vslo, + &B11_vec_vslo, + &B12_vec_vslo, + &B13_vec_vslo, + &B14_vec_vslo, + &B15_vec_vslo, + &B16_vec_vslo, +}; +static const struct builtin *const O_vec_vslw[2] = { + &B1_vec_vslw, + &B2_vec_vslw, +}; +static const struct builtin *const O_vec_vspltb[3] = { + &B1_vec_vspltb, + &B2_vec_vspltb, + &B3_vec_vspltb, +}; +static const struct builtin *const O_vec_vsplth[4] = { + &B1_vec_vsplth, + &B2_vec_vsplth, + &B3_vec_vsplth, + &B4_vec_vsplth, +}; +static const struct builtin *const O_vec_vspltisb[1] = { + &B_vec_vspltisb, +}; +static const struct builtin *const O_vec_vspltish[1] = { + &B_vec_vspltish, +}; +static const struct builtin *const O_vec_vspltisw[1] = { + &B_vec_vspltisw, +}; +static const struct builtin *const O_vec_vspltw[4] = { + &B1_vec_vspltw, + &B2_vec_vspltw, + &B3_vec_vspltw, + &B4_vec_vspltw, +}; +static const struct builtin *const O_vec_vsr[30] = { + &B1_vec_vsr, + &B2_vec_vsr, + &B3_vec_vsr, + &B4_vec_vsr, + &B5_vec_vsr, + &B6_vec_vsr, + &B7_vec_vsr, + &B8_vec_vsr, + &B9_vec_vsr, + &B10_vec_vsr, + &B11_vec_vsr, + &B12_vec_vsr, + &B13_vec_vsr, + &B14_vec_vsr, + &B15_vec_vsr, + &B16_vec_vsr, + &B17_vec_vsr, + &B18_vec_vsr, + &B19_vec_vsr, + &B20_vec_vsr, + &B21_vec_vsr, + &B22_vec_vsr, + &B23_vec_vsr, + &B24_vec_vsr, + &B25_vec_vsr, + &B26_vec_vsr, + &B27_vec_vsr, + &B28_vec_vsr, + &B29_vec_vsr, + &B30_vec_vsr, +}; +static const struct builtin *const O_vec_vsrab[2] = { + &B1_vec_vsrab, + &B2_vec_vsrab, +}; +static const struct builtin *const O_vec_vsrah[2] = { + &B1_vec_vsrah, + &B2_vec_vsrah, +}; +static const struct builtin *const O_vec_vsraw[2] = { + &B1_vec_vsraw, + &B2_vec_vsraw, +}; +static const struct builtin *const O_vec_vsrb[2] = { + &B1_vec_vsrb, + &B2_vec_vsrb, +}; +static const struct builtin *const O_vec_vsrh[2] = { + &B1_vec_vsrh, + &B2_vec_vsrh, +}; +static const struct builtin *const O_vec_vsro[16] = { + &B1_vec_vsro, + &B2_vec_vsro, + &B3_vec_vsro, + &B4_vec_vsro, + &B5_vec_vsro, + &B6_vec_vsro, + &B7_vec_vsro, + &B8_vec_vsro, + &B9_vec_vsro, + &B10_vec_vsro, + &B11_vec_vsro, + &B12_vec_vsro, + &B13_vec_vsro, + &B14_vec_vsro, + &B15_vec_vsro, + &B16_vec_vsro, +}; +static const struct builtin *const O_vec_vsrw[2] = { + &B1_vec_vsrw, + &B2_vec_vsrw, +}; +static const struct builtin *const O_vec_vsubcuw[1] = { + &B_vec_vsubcuw, +}; +static const struct builtin *const O_vec_vsubfp[1] = { + &B_vec_vsubfp, +}; +static const struct builtin *const O_vec_vsubsbs[3] = { + &B1_vec_vsubsbs, + &B2_vec_vsubsbs, + &B3_vec_vsubsbs, +}; +static const struct builtin *const O_vec_vsubshs[3] = { + &B1_vec_vsubshs, + &B2_vec_vsubshs, + &B3_vec_vsubshs, +}; +static const struct builtin *const O_vec_vsubsws[3] = { + &B1_vec_vsubsws, + &B2_vec_vsubsws, + &B3_vec_vsubsws, +}; +static const struct builtin *const O_vec_vsububm[6] = { + &B1_vec_vsububm, + &B2_vec_vsububm, + &B3_vec_vsububm, + &B4_vec_vsububm, + &B5_vec_vsububm, + &B6_vec_vsububm, +}; +static const struct builtin *const O_vec_vsububs[3] = { + &B1_vec_vsububs, + &B2_vec_vsububs, + &B3_vec_vsububs, +}; +static const struct builtin *const O_vec_vsubuhm[6] = { + &B1_vec_vsubuhm, + &B2_vec_vsubuhm, + &B3_vec_vsubuhm, + &B4_vec_vsubuhm, + &B5_vec_vsubuhm, + &B6_vec_vsubuhm, +}; +static const struct builtin *const O_vec_vsubuhs[3] = { + &B1_vec_vsubuhs, + &B2_vec_vsubuhs, + &B3_vec_vsubuhs, +}; +static const struct builtin *const O_vec_vsubuwm[6] = { + &B1_vec_vsubuwm, + &B2_vec_vsubuwm, + &B3_vec_vsubuwm, + &B4_vec_vsubuwm, + &B5_vec_vsubuwm, + &B6_vec_vsubuwm, +}; +static const struct builtin *const O_vec_vsubuws[3] = { + &B1_vec_vsubuws, + &B2_vec_vsubuws, + &B3_vec_vsubuws, +}; +static const struct builtin *const O_vec_vsum2sws[1] = { + &B_vec_vsum2sws, +}; +static const struct builtin *const O_vec_vsum4sbs[1] = { + &B_vec_vsum4sbs, +}; +static const struct builtin *const O_vec_vsum4shs[1] = { + &B_vec_vsum4shs, +}; +static const struct builtin *const O_vec_vsum4ubs[1] = { + &B_vec_vsum4ubs, +}; +static const struct builtin *const O_vec_vsumsws[1] = { + &B_vec_vsumsws, +}; +static const struct builtin *const O_vec_vupkhpx[1] = { + &B_vec_vupkhpx, +}; +static const struct builtin *const O_vec_vupkhsb[2] = { + &B1_vec_vupkhsb, + &B2_vec_vupkhsb, +}; +static const struct builtin *const O_vec_vupkhsh[2] = { + &B1_vec_vupkhsh, + &B2_vec_vupkhsh, +}; +static const struct builtin *const O_vec_vupklpx[1] = { + &B_vec_vupklpx, +}; +static const struct builtin *const O_vec_vupklsb[2] = { + &B1_vec_vupklsb, + &B2_vec_vupklsb, +}; +static const struct builtin *const O_vec_vupklsh[2] = { + &B1_vec_vupklsh, + &B2_vec_vupklsh, +}; +static const struct builtin *const O_vec_vxor[24] = { + &B1_vec_vxor, + &B2_vec_vxor, + &B3_vec_vxor, + &B4_vec_vxor, + &B5_vec_vxor, + &B6_vec_vxor, + &B7_vec_vxor, + &B8_vec_vxor, + &B9_vec_vxor, + &B10_vec_vxor, + &B11_vec_vxor, + &B12_vec_vxor, + &B13_vec_vxor, + &B14_vec_vxor, + &B15_vec_vxor, + &B16_vec_vxor, + &B17_vec_vxor, + &B18_vec_vxor, + &B19_vec_vxor, + &B20_vec_vxor, + &B21_vec_vxor, + &B22_vec_vxor, + &B23_vec_vxor, + &B24_vec_vxor, +}; +static const struct builtin *const O_vec_xor[24] = { + &B1_vec_vxor, + &B2_vec_vxor, + &B3_vec_vxor, + &B4_vec_vxor, + &B5_vec_vxor, + &B6_vec_vxor, + &B7_vec_vxor, + &B8_vec_vxor, + &B9_vec_vxor, + &B10_vec_vxor, + &B11_vec_vxor, + &B12_vec_vxor, + &B13_vec_vxor, + &B14_vec_vxor, + &B15_vec_vxor, + &B16_vec_vxor, + &B17_vec_vxor, + &B18_vec_vxor, + &B19_vec_vxor, + &B20_vec_vxor, + &B21_vec_vxor, + &B22_vec_vxor, + &B23_vec_vxor, + &B24_vec_vxor, +}; + +const struct overloadx Overload[] = { + { "vec_abs", 4, 1, O_vec_abs, O_UID(0) }, + { "vec_abss", 3, 1, O_vec_abss, O_UID(1) }, + { "vec_add", 19, 2, O_vec_add, O_UID(2) }, + { "vec_addc", 1, 2, O_vec_addc, O_UID(3) }, + { "vec_adds", 18, 2, O_vec_adds, O_UID(4) }, + { "vec_all_eq", 23, 2, O_vec_all_eq, O_UID(5) }, + { "vec_all_ge", 19, 2, O_vec_all_ge, O_UID(6) }, + { "vec_all_gt", 19, 2, O_vec_all_gt, O_UID(7) }, + { "vec_all_in", 1, 2, O_vec_all_in, O_UID(8) }, + { "vec_all_le", 19, 2, O_vec_all_le, O_UID(9) }, + { "vec_all_lt", 19, 2, O_vec_all_lt, O_UID(10) }, + { "vec_all_nan", 1, 1, O_vec_all_nan, O_UID(11) }, + { "vec_all_ne", 23, 2, O_vec_all_ne, O_UID(12) }, + { "vec_all_nge", 1, 2, O_vec_all_nge, O_UID(13) }, + { "vec_all_ngt", 1, 2, O_vec_all_ngt, O_UID(14) }, + { "vec_all_nle", 1, 2, O_vec_all_nle, O_UID(15) }, + { "vec_all_nlt", 1, 2, O_vec_all_nlt, O_UID(16) }, + { "vec_all_numeric", 1, 1, O_vec_all_numeric, O_UID(17) }, + { "vec_and", 24, 2, O_vec_and, O_UID(18) }, + { "vec_andc", 24, 2, O_vec_andc, O_UID(19) }, + { "vec_any_eq", 23, 2, O_vec_any_eq, O_UID(20) }, + { "vec_any_ge", 19, 2, O_vec_any_ge, O_UID(21) }, + { "vec_any_gt", 19, 2, O_vec_any_gt, O_UID(22) }, + { "vec_any_le", 19, 2, O_vec_any_le, O_UID(23) }, + { "vec_any_lt", 19, 2, O_vec_any_lt, O_UID(24) }, + { "vec_any_nan", 1, 1, O_vec_any_nan, O_UID(25) }, + { "vec_any_ne", 23, 2, O_vec_any_ne, O_UID(26) }, + { "vec_any_nge", 1, 2, O_vec_any_nge, O_UID(27) }, + { "vec_any_ngt", 1, 2, O_vec_any_ngt, O_UID(28) }, + { "vec_any_nle", 1, 2, O_vec_any_nle, O_UID(29) }, + { "vec_any_nlt", 1, 2, O_vec_any_nlt, O_UID(30) }, + { "vec_any_numeric", 1, 1, O_vec_any_numeric, O_UID(31) }, + { "vec_any_out", 1, 2, O_vec_any_out, O_UID(32) }, + { "vec_avg", 6, 2, O_vec_avg, O_UID(33) }, + { "vec_ceil", 1, 1, O_vec_ceil, O_UID(34) }, + { "vec_cmpb", 1, 2, O_vec_cmpb, O_UID(35) }, + { "vec_cmpeq", 7, 2, O_vec_cmpeq, O_UID(36) }, + { "vec_cmpge", 1, 2, O_vec_cmpge, O_UID(37) }, + { "vec_cmpgt", 7, 2, O_vec_cmpgt, O_UID(38) }, + { "vec_cmple", 1, 2, O_vec_cmple, O_UID(39) }, + { "vec_cmplt", 7, 2, O_vec_cmplt, O_UID(40) }, + { "vec_ctf", 2, 2, O_vec_ctf, O_UID(41) }, + { "vec_cts", 1, 2, O_vec_cts, O_UID(42) }, + { "vec_ctu", 1, 2, O_vec_ctu, O_UID(43) }, + { "vec_dss", 1, 1, O_vec_dss, O_UID(44) }, + { "vec_dssall", 1, 0, O_vec_dssall, O_UID(45) }, + { "vec_dst", 20, 3, O_vec_dst, O_UID(46) }, + { "vec_dstst", 20, 3, O_vec_dstst, O_UID(47) }, + { "vec_dststt", 20, 3, O_vec_dststt, O_UID(48) }, + { "vec_dstt", 20, 3, O_vec_dstt, O_UID(49) }, + { "vec_expte", 1, 1, O_vec_expte, O_UID(50) }, + { "vec_floor", 1, 1, O_vec_floor, O_UID(51) }, + { "vec_ld", 20, 2, O_vec_ld, O_UID(52) }, + { "vec_lde", 9, 2, O_vec_lde, O_UID(53) }, + { "vec_ldl", 20, 2, O_vec_ldl, O_UID(54) }, + { "vec_loge", 1, 1, O_vec_loge, O_UID(55) }, + { "vec_lvebx", 2, 2, O_vec_lvebx, O_UID(56) }, + { "vec_lvehx", 2, 2, O_vec_lvehx, O_UID(57) }, + { "vec_lvewx", 5, 2, O_vec_lvewx, O_UID(58) }, + { "vec_lvsl", 9, 2, O_vec_lvsl, O_UID(59) }, + { "vec_lvsr", 9, 2, O_vec_lvsr, O_UID(60) }, + { "vec_lvx", 20, 2, O_vec_lvx, O_UID(61) }, + { "vec_lvxl", 20, 2, O_vec_lvxl, O_UID(62) }, + { "vec_madd", 1, 3, O_vec_madd, O_UID(63) }, + { "vec_madds", 1, 3, O_vec_madds, O_UID(64) }, + { "vec_max", 19, 2, O_vec_max, O_UID(65) }, + { "vec_mergeh", 11, 2, O_vec_mergeh, O_UID(66) }, + { "vec_mergel", 11, 2, O_vec_mergel, O_UID(67) }, + { "vec_mfvscr", 1, 0, O_vec_mfvscr, O_UID(68) }, + { "vec_min", 19, 2, O_vec_min, O_UID(69) }, + { "vec_mladd", 4, 3, O_vec_mladd, O_UID(70) }, + { "vec_mradds", 1, 3, O_vec_mradds, O_UID(71) }, + { "vec_msum", 4, 3, O_vec_msum, O_UID(72) }, + { "vec_msums", 2, 3, O_vec_msums, O_UID(73) }, + { "vec_mtvscr", 10, 1, O_vec_mtvscr, O_UID(74) }, + { "vec_mule", 4, 2, O_vec_mule, O_UID(75) }, + { "vec_mulo", 4, 2, O_vec_mulo, O_UID(76) }, + { "vec_nmsub", 1, 3, O_vec_nmsub, O_UID(77) }, + { "vec_nor", 10, 2, O_vec_nor, O_UID(78) }, + { "vec_or", 24, 2, O_vec_or, O_UID(79) }, + { "vec_pack", 6, 2, O_vec_pack, O_UID(80) }, + { "vec_packpx", 1, 2, O_vec_packpx, O_UID(81) }, + { "vec_packs", 4, 2, O_vec_packs, O_UID(82) }, + { "vec_packsu", 4, 2, O_vec_packsu, O_UID(83) }, + { "vec_perm", 11, 3, O_vec_perm, O_UID(84) }, + { "vec_re", 1, 1, O_vec_re, O_UID(85) }, + { "vec_rl", 6, 2, O_vec_rl, O_UID(86) }, + { "vec_round", 1, 1, O_vec_round, O_UID(87) }, + { "vec_rsqrte", 1, 1, O_vec_rsqrte, O_UID(88) }, + { "vec_sel", 20, 3, O_vec_sel, O_UID(89) }, + { "vec_sl", 6, 2, O_vec_sl, O_UID(90) }, + { "vec_sld", 11, 3, O_vec_sld, O_UID(91) }, + { "vec_sll", 30, 2, O_vec_sll, O_UID(92) }, + { "vec_slo", 16, 2, O_vec_slo, O_UID(93) }, + { "vec_splat", 11, 2, O_vec_splat, O_UID(94) }, + { "vec_splat_s16", 1, 1, O_vec_splat_s16, O_UID(95) }, + { "vec_splat_s32", 1, 1, O_vec_splat_s32, O_UID(96) }, + { "vec_splat_s8", 1, 1, O_vec_splat_s8, O_UID(97) }, + { "vec_splat_u16", 1, 1, O_vec_splat_u16, O_UID(98) }, + { "vec_splat_u32", 1, 1, O_vec_splat_u32, O_UID(99) }, + { "vec_splat_u8", 1, 1, O_vec_splat_u8, O_UID(100) }, + { "vec_sr", 6, 2, O_vec_sr, O_UID(101) }, + { "vec_sra", 6, 2, O_vec_sra, O_UID(102) }, + { "vec_srl", 30, 2, O_vec_srl, O_UID(103) }, + { "vec_sro", 16, 2, O_vec_sro, O_UID(104) }, + { "vec_st", 30, 3, O_vec_st, O_UID(105) }, + { "vec_ste", 19, 3, O_vec_ste, O_UID(106) }, + { "vec_stl", 30, 3, O_vec_stl, O_UID(107) }, + { "vec_stvebx", 6, 3, O_vec_stvebx, O_UID(108) }, + { "vec_stvehx", 4, 3, O_vec_stvehx, O_UID(109) }, + { "vec_stvewx", 9, 3, O_vec_stvewx, O_UID(110) }, + { "vec_stvx", 30, 3, O_vec_stvx, O_UID(111) }, + { "vec_stvxl", 30, 3, O_vec_stvxl, O_UID(112) }, + { "vec_sub", 19, 2, O_vec_sub, O_UID(113) }, + { "vec_subc", 1, 2, O_vec_subc, O_UID(114) }, + { "vec_subs", 18, 2, O_vec_subs, O_UID(115) }, + { "vec_sum2s", 1, 2, O_vec_sum2s, O_UID(116) }, + { "vec_sum4s", 3, 2, O_vec_sum4s, O_UID(117) }, + { "vec_sums", 1, 2, O_vec_sums, O_UID(118) }, + { "vec_trunc", 1, 1, O_vec_trunc, O_UID(119) }, + { "vec_unpack2sh", 2, 2, O_vec_unpack2sh, O_UID(120) }, + { "vec_unpack2sl", 2, 2, O_vec_unpack2sl, O_UID(121) }, + { "vec_unpack2uh", 2, 2, O_vec_unpack2uh, O_UID(122) }, + { "vec_unpack2ul", 2, 2, O_vec_unpack2ul, O_UID(123) }, + { "vec_unpackh", 5, 1, O_vec_unpackh, O_UID(124) }, + { "vec_unpackl", 5, 1, O_vec_unpackl, O_UID(125) }, + { "vec_vaddcuw", 1, 2, O_vec_vaddcuw, O_UID(126) }, + { "vec_vaddfp", 1, 2, O_vec_vaddfp, O_UID(127) }, + { "vec_vaddsbs", 3, 2, O_vec_vaddsbs, O_UID(128) }, + { "vec_vaddshs", 3, 2, O_vec_vaddshs, O_UID(129) }, + { "vec_vaddsws", 3, 2, O_vec_vaddsws, O_UID(130) }, + { "vec_vaddubm", 6, 2, O_vec_vaddubm, O_UID(131) }, + { "vec_vaddubs", 3, 2, O_vec_vaddubs, O_UID(132) }, + { "vec_vadduhm", 6, 2, O_vec_vadduhm, O_UID(133) }, + { "vec_vadduhs", 3, 2, O_vec_vadduhs, O_UID(134) }, + { "vec_vadduwm", 6, 2, O_vec_vadduwm, O_UID(135) }, + { "vec_vadduws", 3, 2, O_vec_vadduws, O_UID(136) }, + { "vec_vand", 24, 2, O_vec_vand, O_UID(137) }, + { "vec_vandc", 24, 2, O_vec_vandc, O_UID(138) }, + { "vec_vavgsb", 1, 2, O_vec_vavgsb, O_UID(139) }, + { "vec_vavgsh", 1, 2, O_vec_vavgsh, O_UID(140) }, + { "vec_vavgsw", 1, 2, O_vec_vavgsw, O_UID(141) }, + { "vec_vavgub", 1, 2, O_vec_vavgub, O_UID(142) }, + { "vec_vavguh", 1, 2, O_vec_vavguh, O_UID(143) }, + { "vec_vavguw", 1, 2, O_vec_vavguw, O_UID(144) }, + { "vec_vcfsx", 1, 2, O_vec_vcfsx, O_UID(145) }, + { "vec_vcfux", 1, 2, O_vec_vcfux, O_UID(146) }, + { "vec_vcmpbfp", 1, 2, O_vec_vcmpbfp, O_UID(147) }, + { "vec_vcmpeqfp", 1, 2, O_vec_vcmpeqfp, O_UID(148) }, + { "vec_vcmpequb", 2, 2, O_vec_vcmpequb, O_UID(149) }, + { "vec_vcmpequh", 2, 2, O_vec_vcmpequh, O_UID(150) }, + { "vec_vcmpequw", 2, 2, O_vec_vcmpequw, O_UID(151) }, + { "vec_vcmpgefp", 1, 2, O_vec_vcmpgefp, O_UID(152) }, + { "vec_vcmpgtfp", 1, 2, O_vec_vcmpgtfp, O_UID(153) }, + { "vec_vcmpgtsb", 1, 2, O_vec_vcmpgtsb, O_UID(154) }, + { "vec_vcmpgtsh", 1, 2, O_vec_vcmpgtsh, O_UID(155) }, + { "vec_vcmpgtsw", 1, 2, O_vec_vcmpgtsw, O_UID(156) }, + { "vec_vcmpgtub", 1, 2, O_vec_vcmpgtub, O_UID(157) }, + { "vec_vcmpgtuh", 1, 2, O_vec_vcmpgtuh, O_UID(158) }, + { "vec_vcmpgtuw", 1, 2, O_vec_vcmpgtuw, O_UID(159) }, + { "vec_vctsxs", 1, 2, O_vec_vctsxs, O_UID(160) }, + { "vec_vctuxs", 1, 2, O_vec_vctuxs, O_UID(161) }, + { "vec_vexptefp", 1, 1, O_vec_vexptefp, O_UID(162) }, + { "vec_vlogefp", 1, 1, O_vec_vlogefp, O_UID(163) }, + { "vec_vmaddfp", 1, 3, O_vec_vmaddfp, O_UID(164) }, + { "vec_vmaxfp", 1, 2, O_vec_vmaxfp, O_UID(165) }, + { "vec_vmaxsb", 3, 2, O_vec_vmaxsb, O_UID(166) }, + { "vec_vmaxsh", 3, 2, O_vec_vmaxsh, O_UID(167) }, + { "vec_vmaxsw", 3, 2, O_vec_vmaxsw, O_UID(168) }, + { "vec_vmaxub", 3, 2, O_vec_vmaxub, O_UID(169) }, + { "vec_vmaxuh", 3, 2, O_vec_vmaxuh, O_UID(170) }, + { "vec_vmaxuw", 3, 2, O_vec_vmaxuw, O_UID(171) }, + { "vec_vmhaddshs", 1, 3, O_vec_vmhaddshs, O_UID(172) }, + { "vec_vmhraddshs", 1, 3, O_vec_vmhraddshs, O_UID(173) }, + { "vec_vminfp", 1, 2, O_vec_vminfp, O_UID(174) }, + { "vec_vminsb", 3, 2, O_vec_vminsb, O_UID(175) }, + { "vec_vminsh", 3, 2, O_vec_vminsh, O_UID(176) }, + { "vec_vminsw", 3, 2, O_vec_vminsw, O_UID(177) }, + { "vec_vminub", 3, 2, O_vec_vminub, O_UID(178) }, + { "vec_vminuh", 3, 2, O_vec_vminuh, O_UID(179) }, + { "vec_vminuw", 3, 2, O_vec_vminuw, O_UID(180) }, + { "vec_vmladduhm", 4, 3, O_vec_vmladduhm, O_UID(181) }, + { "vec_vmrghb", 3, 2, O_vec_vmrghb, O_UID(182) }, + { "vec_vmrghh", 4, 2, O_vec_vmrghh, O_UID(183) }, + { "vec_vmrghw", 4, 2, O_vec_vmrghw, O_UID(184) }, + { "vec_vmrglb", 3, 2, O_vec_vmrglb, O_UID(185) }, + { "vec_vmrglh", 4, 2, O_vec_vmrglh, O_UID(186) }, + { "vec_vmrglw", 4, 2, O_vec_vmrglw, O_UID(187) }, + { "vec_vmsummbm", 1, 3, O_vec_vmsummbm, O_UID(188) }, + { "vec_vmsumshm", 1, 3, O_vec_vmsumshm, O_UID(189) }, + { "vec_vmsumshs", 1, 3, O_vec_vmsumshs, O_UID(190) }, + { "vec_vmsumubm", 1, 3, O_vec_vmsumubm, O_UID(191) }, + { "vec_vmsumuhm", 1, 3, O_vec_vmsumuhm, O_UID(192) }, + { "vec_vmsumuhs", 1, 3, O_vec_vmsumuhs, O_UID(193) }, + { "vec_vmulesb", 1, 2, O_vec_vmulesb, O_UID(194) }, + { "vec_vmulesh", 1, 2, O_vec_vmulesh, O_UID(195) }, + { "vec_vmuleub", 1, 2, O_vec_vmuleub, O_UID(196) }, + { "vec_vmuleuh", 1, 2, O_vec_vmuleuh, O_UID(197) }, + { "vec_vmulosb", 1, 2, O_vec_vmulosb, O_UID(198) }, + { "vec_vmulosh", 1, 2, O_vec_vmulosh, O_UID(199) }, + { "vec_vmuloub", 1, 2, O_vec_vmuloub, O_UID(200) }, + { "vec_vmulouh", 1, 2, O_vec_vmulouh, O_UID(201) }, + { "vec_vnmsubfp", 1, 3, O_vec_vnmsubfp, O_UID(202) }, + { "vec_vnor", 10, 2, O_vec_vnor, O_UID(203) }, + { "vec_vor", 24, 2, O_vec_vor, O_UID(204) }, + { "vec_vperm", 11, 3, O_vec_vperm, O_UID(205) }, + { "vec_vpkpx", 1, 2, O_vec_vpkpx, O_UID(206) }, + { "vec_vpkshss", 1, 2, O_vec_vpkshss, O_UID(207) }, + { "vec_vpkshus", 1, 2, O_vec_vpkshus, O_UID(208) }, + { "vec_vpkswss", 1, 2, O_vec_vpkswss, O_UID(209) }, + { "vec_vpkswus", 1, 2, O_vec_vpkswus, O_UID(210) }, + { "vec_vpkuhum", 3, 2, O_vec_vpkuhum, O_UID(211) }, + { "vec_vpkuhus", 1, 2, O_vec_vpkuhus, O_UID(212) }, + { "vec_vpkuwum", 3, 2, O_vec_vpkuwum, O_UID(213) }, + { "vec_vpkuwus", 1, 2, O_vec_vpkuwus, O_UID(214) }, + { "vec_vrefp", 1, 1, O_vec_vrefp, O_UID(215) }, + { "vec_vrfim", 1, 1, O_vec_vrfim, O_UID(216) }, + { "vec_vrfin", 1, 1, O_vec_vrfin, O_UID(217) }, + { "vec_vrfip", 1, 1, O_vec_vrfip, O_UID(218) }, + { "vec_vrfiz", 1, 1, O_vec_vrfiz, O_UID(219) }, + { "vec_vrlb", 2, 2, O_vec_vrlb, O_UID(220) }, + { "vec_vrlh", 2, 2, O_vec_vrlh, O_UID(221) }, + { "vec_vrlw", 2, 2, O_vec_vrlw, O_UID(222) }, + { "vec_vrsqrtefp", 1, 1, O_vec_vrsqrtefp, O_UID(223) }, + { "vec_vsel", 20, 3, O_vec_vsel, O_UID(224) }, + { "vec_vsl", 30, 2, O_vec_vsl, O_UID(225) }, + { "vec_vslb", 2, 2, O_vec_vslb, O_UID(226) }, + { "vec_vsldoi", 11, 3, O_vec_vsldoi, O_UID(227) }, + { "vec_vslh", 2, 2, O_vec_vslh, O_UID(228) }, + { "vec_vslo", 16, 2, O_vec_vslo, O_UID(229) }, + { "vec_vslw", 2, 2, O_vec_vslw, O_UID(230) }, + { "vec_vspltb", 3, 2, O_vec_vspltb, O_UID(231) }, + { "vec_vsplth", 4, 2, O_vec_vsplth, O_UID(232) }, + { "vec_vspltisb", 1, 1, O_vec_vspltisb, O_UID(233) }, + { "vec_vspltish", 1, 1, O_vec_vspltish, O_UID(234) }, + { "vec_vspltisw", 1, 1, O_vec_vspltisw, O_UID(235) }, + { "vec_vspltw", 4, 2, O_vec_vspltw, O_UID(236) }, + { "vec_vsr", 30, 2, O_vec_vsr, O_UID(237) }, + { "vec_vsrab", 2, 2, O_vec_vsrab, O_UID(238) }, + { "vec_vsrah", 2, 2, O_vec_vsrah, O_UID(239) }, + { "vec_vsraw", 2, 2, O_vec_vsraw, O_UID(240) }, + { "vec_vsrb", 2, 2, O_vec_vsrb, O_UID(241) }, + { "vec_vsrh", 2, 2, O_vec_vsrh, O_UID(242) }, + { "vec_vsro", 16, 2, O_vec_vsro, O_UID(243) }, + { "vec_vsrw", 2, 2, O_vec_vsrw, O_UID(244) }, + { "vec_vsubcuw", 1, 2, O_vec_vsubcuw, O_UID(245) }, + { "vec_vsubfp", 1, 2, O_vec_vsubfp, O_UID(246) }, + { "vec_vsubsbs", 3, 2, O_vec_vsubsbs, O_UID(247) }, + { "vec_vsubshs", 3, 2, O_vec_vsubshs, O_UID(248) }, + { "vec_vsubsws", 3, 2, O_vec_vsubsws, O_UID(249) }, + { "vec_vsububm", 6, 2, O_vec_vsububm, O_UID(250) }, + { "vec_vsububs", 3, 2, O_vec_vsububs, O_UID(251) }, + { "vec_vsubuhm", 6, 2, O_vec_vsubuhm, O_UID(252) }, + { "vec_vsubuhs", 3, 2, O_vec_vsubuhs, O_UID(253) }, + { "vec_vsubuwm", 6, 2, O_vec_vsubuwm, O_UID(254) }, + { "vec_vsubuws", 3, 2, O_vec_vsubuws, O_UID(255) }, + { "vec_vsum2sws", 1, 2, O_vec_vsum2sws, O_UID(256) }, + { "vec_vsum4sbs", 1, 2, O_vec_vsum4sbs, O_UID(257) }, + { "vec_vsum4shs", 1, 2, O_vec_vsum4shs, O_UID(258) }, + { "vec_vsum4ubs", 1, 2, O_vec_vsum4ubs, O_UID(259) }, + { "vec_vsumsws", 1, 2, O_vec_vsumsws, O_UID(260) }, + { "vec_vupkhpx", 1, 1, O_vec_vupkhpx, O_UID(261) }, + { "vec_vupkhsb", 2, 1, O_vec_vupkhsb, O_UID(262) }, + { "vec_vupkhsh", 2, 1, O_vec_vupkhsh, O_UID(263) }, + { "vec_vupklpx", 1, 1, O_vec_vupklpx, O_UID(264) }, + { "vec_vupklsb", 2, 1, O_vec_vupklsb, O_UID(265) }, + { "vec_vupklsh", 2, 1, O_vec_vupklsh, O_UID(266) }, + { "vec_vxor", 24, 2, O_vec_vxor, O_UID(267) }, + { "vec_xor", 24, 2, O_vec_xor, O_UID(268) }, + { NULL, 0, 0, NULL, 0 } +}; +#define LAST_O_UID O_UID(269) diff --git a/gcc/config/rs6000/vec.ops b/gcc/config/rs6000/vec.ops new file mode 100644 index 00000000000..5ef80a2d6b8 --- /dev/null +++ b/gcc/config/rs6000/vec.ops @@ -0,0 +1,1025 @@ +# APPLE LOCAL file AltiVec +# ops-to-gp -gcc vec.ops builtin.ops +vec_abs vec_s8 = vec_s8 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 1 FALSE FALSE transform_vec_abs +vec_abs vec_s16 = vec_s16 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 2 FALSE FALSE transform_vec_abs +vec_abs vec_s32 = vec_s32 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 3 FALSE FALSE transform_vec_abs +vec_abs vec_f32 = vec_f32 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 4 FALSE FALSE transform_vec_abs +vec_abss vec_s8 = vec_s8 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 5 FALSE FALSE transform_vec_abs +vec_abss vec_s16 = vec_s16 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 6 FALSE FALSE transform_vec_abs +vec_abss vec_s32 = vec_s32 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 7 FALSE FALSE transform_vec_abs +vec_cmplt vec_u8 vec_u8 = vec_b8 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtub FALSE FALSE transform_vec_cmp_reverse +vec_cmplt vec_u16 vec_u16 = vec_b16 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuh FALSE FALSE transform_vec_cmp_reverse +vec_cmplt vec_u32 vec_u32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuw FALSE FALSE transform_vec_cmp_reverse +vec_cmplt vec_s8 vec_s8 = vec_b8 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsb FALSE FALSE transform_vec_cmp_reverse +vec_cmplt vec_s16 vec_s16 = vec_b16 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsh FALSE FALSE transform_vec_cmp_reverse +vec_cmplt vec_s32 vec_s32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsw FALSE FALSE transform_vec_cmp_reverse +vec_cmplt vec_f32 vec_f32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfp FALSE FALSE transform_vec_cmp_reverse +vec_cmple vec_f32 vec_f32 = vec_b32 vec_cmple BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefp FALSE FALSE transform_vec_cmp_reverse +vec_add vec_s8 vec_s8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_s8 vec_b8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_b8 vec_s8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_s16 vec_s16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_s16 vec_b16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_b16 vec_s16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_s32 vec_s32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_s32 vec_b32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_b32 vec_s32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_u8 vec_u8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_u8 vec_b8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_b8 vec_u8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_u16 vec_u16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_u16 vec_b16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_b16 vec_u16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_u32 vec_u32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_u32 vec_b32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_b32 vec_u32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_s8 vec_s8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_s8 vec_b8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_b8 vec_s8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_s16 vec_s16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_s16 vec_b16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_b16 vec_s16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_s32 vec_s32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_s32 vec_b32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_b32 vec_s32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_u8 vec_u8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_u8 vec_b8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_b8 vec_u8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_u16 vec_u16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_u16 vec_b16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_b16 vec_u16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_u32 vec_u32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_u32 vec_b32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_adds vec_b32 vec_u32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_s8 vec_s8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_s8 vec_b8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_b8 vec_s8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_s16 vec_s16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_s16 vec_b16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_b16 vec_s16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_s32 vec_s32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_s32 vec_b32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_b32 vec_s32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_u8 vec_u8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_u8 vec_b8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_b8 vec_u8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_u16 vec_u16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_u16 vec_b16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_b16 vec_u16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_u32 vec_u32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_u32 vec_b32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_b32 vec_u32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_s8 vec_s8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_s8 vec_b8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_b8 vec_s8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_s16 vec_s16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_s16 vec_b16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_b16 vec_s16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_s32 vec_s32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_s32 vec_b32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_b32 vec_s32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_u8 vec_u8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_u8 vec_b8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_b8 vec_u8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_u16 vec_u16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_u16 vec_b16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_b16 vec_u16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_u32 vec_u32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_u32 vec_b32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subs vec_b32 vec_u32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_addc vec_u32 vec_u32 = vec_u32 vec_vaddcuw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_subc vec_u32 vec_u32 = vec_u32 vec_vsubcuw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mulo vec_u8 vec_u8 = vec_u16 vec_vmuloub BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mulo vec_u16 vec_u16 = vec_u32 vec_vmulouh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mulo vec_s8 vec_s8 = vec_s16 vec_vmulosb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mulo vec_s16 vec_s16 = vec_s32 vec_vmulosh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mule vec_u8 vec_u8 = vec_u16 vec_vmuleub BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mule vec_u16 vec_u16 = vec_u32 vec_vmuleuh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mule vec_s8 vec_s8 = vec_s16 vec_vmulesb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mule vec_s16 vec_s16 = vec_s32 vec_vmulesh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mladd vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mladd vec_u16 vec_u16 vec_u16 = vec_u16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mladd vec_s16 vec_u16 vec_u16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mladd vec_u16 vec_s16 vec_s16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_madds vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmhaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mradds vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmhraddshs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_msum vec_s8 vec_u8 vec_s32 = vec_s32 vec_vmsummbm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_msum vec_u8 vec_u8 vec_u32 = vec_u32 vec_vmsumubm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_msum vec_s16 vec_s16 vec_s32 = vec_s32 vec_vmsumshm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_msum vec_u16 vec_u16 vec_u32 = vec_u32 vec_vmsumuhm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_msums vec_s16 vec_s16 vec_s32 = vec_s32 vec_vmsumshs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_msums vec_u16 vec_u16 vec_u32 = vec_u32 vec_vmsumuhs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sums vec_s32 vec_s32 = vec_s32 vec_vsumsws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sum2s vec_s32 vec_s32 = vec_s32 vec_vsum2sws BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sum4s vec_s8 vec_s32 = vec_s32 vec_vsum4sbs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sum4s vec_u8 vec_u32 = vec_u32 vec_vsum4ubs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sum4s vec_s16 vec_s32 = vec_s32 vec_vsum4shs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_avg vec_s8 vec_s8 = vec_s8 vec_vavgsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_avg vec_s16 vec_s16 = vec_s16 vec_vavgsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_avg vec_u8 vec_u8 = vec_u8 vec_vavgub BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_avg vec_u16 vec_u16 = vec_u16 vec_vavguh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_avg vec_s32 vec_s32 = vec_s32 vec_vavgsw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_avg vec_u32 vec_u32 = vec_u32 vec_vavguw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_s8 vec_s8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_s8 vec_b8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_b8 vec_s8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_u8 vec_u8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_b8 vec_u8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_u8 vec_b8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_b8 vec_b8 = vec_b8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_s16 vec_s16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_s16 vec_b16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_b16 vec_s16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_u16 vec_u16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_b16 vec_u16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_u16 vec_b16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_b16 vec_b16 = vec_b16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_s32 vec_s32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_s32 vec_b32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_b32 vec_s32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_u32 vec_u32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_b32 vec_u32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_u32 vec_b32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_b32 vec_b32 = vec_b32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_f32 vec_f32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_f32 vec_b32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_and vec_b32 vec_f32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_s8 vec_s8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_s8 vec_b8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_b8 vec_s8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_u8 vec_u8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_b8 vec_u8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_u8 vec_b8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_b8 vec_b8 = vec_b8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_s16 vec_s16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_s16 vec_b16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_b16 vec_s16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_u16 vec_u16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_b16 vec_u16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_u16 vec_b16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_b16 vec_b16 = vec_b16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_s32 vec_s32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_s32 vec_b32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_b32 vec_s32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_u32 vec_u32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_b32 vec_u32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_u32 vec_b32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_b32 vec_b32 = vec_b32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_f32 vec_f32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_f32 vec_b32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_or vec_b32 vec_f32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_s8 vec_s8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_s8 vec_b8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_b8 vec_s8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_u8 vec_u8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_b8 vec_u8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_u8 vec_b8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_b8 vec_b8 = vec_b8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_s16 vec_s16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_s16 vec_b16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_b16 vec_s16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_u16 vec_u16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_b16 vec_u16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_u16 vec_b16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_b16 vec_b16 = vec_b16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_s32 vec_s32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_s32 vec_b32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_b32 vec_s32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_u32 vec_u32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_b32 vec_u32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_u32 vec_b32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_b32 vec_b32 = vec_b32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_f32 vec_f32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_f32 vec_b32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_xor vec_b32 vec_f32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_s8 vec_s8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_s8 vec_b8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_b8 vec_s8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_u8 vec_u8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_b8 vec_u8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_u8 vec_b8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_b8 vec_b8 = vec_b8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_s16 vec_s16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_s16 vec_b16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_b16 vec_s16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_u16 vec_u16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_b16 vec_u16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_u16 vec_b16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_b16 vec_b16 = vec_b16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_s32 vec_s32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_s32 vec_b32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_b32 vec_s32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_u32 vec_u32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_b32 vec_u32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_u32 vec_b32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_b32 vec_b32 = vec_b32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_f32 vec_f32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_f32 vec_b32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_andc vec_b32 vec_f32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nor vec_u8 vec_u8 = vec_u8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nor vec_s8 vec_s8 = vec_s8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nor vec_b8 vec_b8 = vec_b8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nor vec_u16 vec_u16 = vec_u16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nor vec_s16 vec_s16 = vec_s16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nor vec_b16 vec_b16 = vec_b16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nor vec_u32 vec_u32 = vec_u32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nor vec_s32 vec_s32 = vec_s32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nor vec_b32 vec_b32 = vec_b32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nor vec_f32 vec_f32 = vec_f32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_rl vec_u8 vec_u8 = vec_u8 vec_vrlb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_rl vec_u16 vec_u16 = vec_u16 vec_vrlh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_rl vec_u32 vec_u32 = vec_u32 vec_vrlw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_rl vec_s8 vec_u8 = vec_s8 vec_vrlb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_rl vec_s16 vec_u16 = vec_s16 vec_vrlh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_rl vec_s32 vec_u32 = vec_s32 vec_vrlw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sl vec_u8 vec_u8 = vec_u8 vec_vslb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sl vec_u16 vec_u16 = vec_u16 vec_vslh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sl vec_u32 vec_u32 = vec_u32 vec_vslw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sl vec_s8 vec_u8 = vec_s8 vec_vslb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sl vec_s16 vec_u16 = vec_s16 vec_vslh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sl vec_s32 vec_u32 = vec_s32 vec_vslw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_u8 vec_u8 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_u16 vec_u8 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_u32 vec_u8 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_s8 vec_u8 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_s16 vec_u8 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_s32 vec_u8 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_b8 vec_u8 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_b16 vec_u8 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_b32 vec_u8 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_p16 vec_u8 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_u8 vec_u16 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_u16 vec_u16 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_u32 vec_u16 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_s8 vec_u16 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_s16 vec_u16 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_s32 vec_u16 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_b8 vec_u16 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_b16 vec_u16 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_b32 vec_u16 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_p16 vec_u16 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_u8 vec_u32 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_u16 vec_u32 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_u32 vec_u32 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_s8 vec_u32 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_s16 vec_u32 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_s32 vec_u32 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_b8 vec_u32 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_b16 vec_u32 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_b32 vec_u32 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sll vec_p16 vec_u32 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sr vec_u8 vec_u8 = vec_u8 vec_vsrb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sr vec_u16 vec_u16 = vec_u16 vec_vsrh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sr vec_u32 vec_u32 = vec_u32 vec_vsrw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sr vec_s8 vec_u8 = vec_s8 vec_vsrb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sr vec_s16 vec_u16 = vec_s16 vec_vsrh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sr vec_s32 vec_u32 = vec_s32 vec_vsrw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sra vec_u8 vec_u8 = vec_u8 vec_vsrab BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sra vec_u16 vec_u16 = vec_u16 vec_vsrah BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sra vec_u32 vec_u32 = vec_u32 vec_vsraw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sra vec_s8 vec_u8 = vec_s8 vec_vsrab BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sra vec_s16 vec_u16 = vec_s16 vec_vsrah BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sra vec_s32 vec_u32 = vec_s32 vec_vsraw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_u8 vec_u8 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_u16 vec_u8 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_u32 vec_u8 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_s8 vec_u8 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_s16 vec_u8 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_s32 vec_u8 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_b8 vec_u8 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_b16 vec_u8 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_b32 vec_u8 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_p16 vec_u8 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_u8 vec_u16 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_u16 vec_u16 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_u32 vec_u16 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_s8 vec_u16 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_s16 vec_u16 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_s32 vec_u16 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_b8 vec_u16 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_b16 vec_u16 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_b32 vec_u16 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_p16 vec_u16 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_u8 vec_u32 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_u16 vec_u32 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_u32 vec_u32 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_s8 vec_u32 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_s16 vec_u32 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_s32 vec_u32 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_b8 vec_u32 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_b16 vec_u32 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_b32 vec_u32 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_srl vec_p16 vec_u32 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpgt vec_u8 vec_u8 = vec_b8 vec_vcmpgtub BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpgt vec_u16 vec_u16 = vec_b16 vec_vcmpgtuh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpgt vec_u32 vec_u32 = vec_b32 vec_vcmpgtuw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpgt vec_s8 vec_s8 = vec_b8 vec_vcmpgtsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpgt vec_s16 vec_s16 = vec_b16 vec_vcmpgtsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpgt vec_s32 vec_s32 = vec_b32 vec_vcmpgtsw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpeq vec_u8 vec_u8 = vec_b8 vec_vcmpequb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpeq vec_u16 vec_u16 = vec_b16 vec_vcmpequh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpeq vec_u32 vec_u32 = vec_b32 vec_vcmpequw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpeq vec_s8 vec_s8 = vec_b8 vec_vcmpequb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpeq vec_s16 vec_s16 = vec_b16 vec_vcmpequh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpeq vec_s32 vec_s32 = vec_b32 vec_vcmpequw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_b8 vec_b8 vec_b8 = vec_b8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_b8 vec_b8 vec_u8 = vec_b8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_u8 vec_u8 vec_u8 = vec_u8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_u8 vec_u8 vec_b8 = vec_u8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_s8 vec_s8 vec_u8 = vec_s8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_s8 vec_s8 vec_b8 = vec_s8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_b16 vec_b16 vec_b16 = vec_b16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_b16 vec_b16 vec_u16 = vec_b16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_u16 vec_u16 vec_u16 = vec_u16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_u16 vec_u16 vec_b16 = vec_u16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_s16 vec_s16 vec_u16 = vec_s16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_s16 vec_s16 vec_b16 = vec_s16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_b32 vec_b32 vec_b32 = vec_b32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_b32 vec_b32 vec_u32 = vec_b32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_u32 vec_u32 vec_u32 = vec_u32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_u32 vec_u32 vec_b32 = vec_u32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_s32 vec_s32 vec_u32 = vec_s32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_s32 vec_s32 vec_b32 = vec_s32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_f32 vec_f32 vec_b32 = vec_f32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sel vec_f32 vec_f32 vec_u32 = vec_f32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_pack vec_u16 vec_u16 = vec_u8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_pack vec_u32 vec_u32 = vec_u16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_pack vec_s16 vec_s16 = vec_s8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_pack vec_s32 vec_s32 = vec_s16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_pack vec_b16 vec_b16 = vec_b8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_pack vec_b32 vec_b32 = vec_b16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_packs vec_u16 vec_u16 = vec_u8 vec_vpkuhus BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_packs vec_u32 vec_u32 = vec_u16 vec_vpkuwus BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_packsu vec_u16 vec_u16 = vec_u8 vec_vpkuhus BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_packsu vec_u32 vec_u32 = vec_u16 vec_vpkuwus BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_packs vec_s16 vec_s16 = vec_s8 vec_vpkshss BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_packs vec_s32 vec_s32 = vec_s16 vec_vpkswss BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_packsu vec_s16 vec_s16 = vec_u8 vec_vpkshus BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_packsu vec_s32 vec_s32 = vec_u16 vec_vpkswus BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_packpx vec_u32 vec_u32 = vec_p16 vec_vpkpx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpackh vec_s8 = vec_s16 vec_vupkhsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpackh vec_s16 = vec_s32 vec_vupkhsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpackh vec_b8 = vec_b16 vec_vupkhsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpackh vec_b16 = vec_b32 vec_vupkhsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpackh vec_p16 = vec_u32 vec_vupkhpx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpackl vec_s8 = vec_s16 vec_vupklsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpackl vec_s16 = vec_s32 vec_vupklsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpackl vec_b8 = vec_b16 vec_vupklsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpackl vec_b16 = vec_b32 vec_vupklsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpackl vec_p16 = vec_u32 vec_vupklpx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_u8 vec_u8 = vec_u8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_u16 vec_u16 = vec_u16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_u32 vec_u32 = vec_u32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_s8 vec_s8 = vec_s8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_s16 vec_s16 = vec_s16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_s32 vec_s32 = vec_s32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_f32 vec_f32 = vec_f32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_p16 vec_p16 = vec_p16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_b8 vec_b8 = vec_b8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_b16 vec_b16 = vec_b16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergeh vec_b32 vec_b32 = vec_b32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpack2uh vec_u8 vec_u8 = vec_u16 vec_unpack2uh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghb +vec_unpack2uh vec_u16 vec_u16 = vec_u32 vec_unpack2uh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghh +vec_unpack2sh vec_u8 vec_u8 = vec_s16 vec_unpack2sh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghb +vec_unpack2sh vec_u16 vec_u16 = vec_s32 vec_unpack2sh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghh +vec_mergel vec_u8 vec_u8 = vec_u8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergel vec_u16 vec_u16 = vec_u16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergel vec_u32 vec_u32 = vec_u32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergel vec_s8 vec_s8 = vec_s8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergel vec_s16 vec_s16 = vec_s16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergel vec_s32 vec_s32 = vec_s32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergel vec_f32 vec_f32 = vec_f32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergel vec_p16 vec_p16 = vec_p16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergel vec_b8 vec_b8 = vec_b8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergel vec_b16 vec_b16 = vec_b16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mergel vec_b32 vec_b32 = vec_b32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_unpack2ul vec_u8 vec_u8 = vec_u16 vec_unpack2ul BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglb +vec_unpack2ul vec_u16 vec_u16 = vec_u32 vec_unpack2ul BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglh +vec_unpack2sl vec_u8 vec_u8 = vec_s16 vec_unpack2sl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglb +vec_unpack2sl vec_u16 vec_u16 = vec_s32 vec_unpack2sl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglh +vec_splat vec_u8 immed_u5 = vec_u8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat vec_u16 immed_u5 = vec_u16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat vec_u32 immed_u5 = vec_u32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat vec_s8 immed_u5 = vec_s8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat vec_s16 immed_u5 = vec_s16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat vec_s32 immed_u5 = vec_s32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat vec_b8 immed_u5 = vec_b8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat vec_b16 immed_u5 = vec_b16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat vec_b32 immed_u5 = vec_b32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat vec_p16 immed_u5 = vec_p16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat vec_f32 immed_u5 = vec_f32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat_s8 immed_s5 = vec_s8 vec_vspltisb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat_s16 immed_s5 = vec_s16 vec_vspltish BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat_s32 immed_s5 = vec_s32 vec_vspltisw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_splat_u8 immed_s5 = vec_u8 vec_splat_u8 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltisb +vec_splat_u16 immed_s5 = vec_u16 vec_splat_u16 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltish +vec_splat_u32 immed_s5 = vec_u32 vec_splat_u32 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltisw +vec_perm vec_u8 vec_u8 vec_u8 = vec_u8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_perm vec_u16 vec_u16 vec_u8 = vec_u16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_perm vec_u32 vec_u32 vec_u8 = vec_u32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_perm vec_s8 vec_s8 vec_u8 = vec_s8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_perm vec_s16 vec_s16 vec_u8 = vec_s16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_perm vec_s32 vec_s32 vec_u8 = vec_s32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_perm vec_b8 vec_b8 vec_u8 = vec_b8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_perm vec_b16 vec_b16 vec_u8 = vec_b16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_perm vec_b32 vec_b32 vec_u8 = vec_b32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_perm vec_p16 vec_p16 vec_u8 = vec_p16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_perm vec_f32 vec_f32 vec_u8 = vec_f32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_u8 vec_u8 immed_u4 = vec_u8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_u16 vec_u16 immed_u4 = vec_u16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_u32 vec_u32 immed_u4 = vec_u32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_s8 vec_s8 immed_u4 = vec_s8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_s16 vec_s16 immed_u4 = vec_s16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_s32 vec_s32 immed_u4 = vec_s32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_p16 vec_p16 immed_u4 = vec_p16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_f32 vec_f32 immed_u4 = vec_f32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_b8 vec_b8 immed_u4 = vec_b8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_b16 vec_b16 immed_u4 = vec_b16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sld vec_b32 vec_b32 immed_u4 = vec_b32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_u8 vec_u8 = vec_u8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_u16 vec_u8 = vec_u16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_u32 vec_u8 = vec_u32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_s8 vec_u8 = vec_s8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_s16 vec_u8 = vec_s16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_s32 vec_u8 = vec_s32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_p16 vec_u8 = vec_p16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_u8 vec_s8 = vec_u8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_u16 vec_s8 = vec_u16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_u32 vec_s8 = vec_u32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_s8 vec_s8 = vec_s8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_s16 vec_s8 = vec_s16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_s32 vec_s8 = vec_s32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_p16 vec_s8 = vec_p16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_f32 vec_u8 = vec_f32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_slo vec_f32 vec_s8 = vec_f32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_u8 vec_u8 = vec_u8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_u16 vec_u8 = vec_u16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_u32 vec_u8 = vec_u32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_s8 vec_u8 = vec_s8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_s16 vec_u8 = vec_s16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_s32 vec_u8 = vec_s32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_p16 vec_u8 = vec_p16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_u8 vec_s8 = vec_u8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_u16 vec_s8 = vec_u16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_u32 vec_s8 = vec_u32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_s8 vec_s8 = vec_s8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_s16 vec_s8 = vec_s16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_s32 vec_s8 = vec_s32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_p16 vec_s8 = vec_p16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_f32 vec_u8 = vec_f32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sro vec_f32 vec_s8 = vec_f32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_u8 vec_u8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_u8 vec_b8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_b8 vec_u8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_s8 vec_s8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_s8 vec_b8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_b8 vec_s8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_u16 vec_u16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_u16 vec_b16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_b16 vec_u16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_s16 vec_s16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_s16 vec_b16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_b16 vec_s16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_u32 vec_u32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_u32 vec_b32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_b32 vec_u32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_s32 vec_s32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_s32 vec_b32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_b32 vec_s32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_u8 vec_u8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_u8 vec_b8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_b8 vec_u8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_s8 vec_s8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_s8 vec_b8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_b8 vec_s8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_u16 vec_u16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_u16 vec_b16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_b16 vec_u16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_s16 vec_s16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_s16 vec_b16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_b16 vec_s16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_u32 vec_u32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_u32 vec_b32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_b32 vec_u32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_s32 vec_s32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_s32 vec_b32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_b32 vec_s32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_lde int const_unsigned_char_ptr = vec_u8_load_op vec_lvebx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvebx TRUE FALSE +vec_lde int const_unsigned_short_ptr = vec_u16_load_op vec_lvehx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvehx TRUE FALSE +vec_lde int const_unsigned_int_ptr = vec_u32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE +vec_lde int const_unsigned_long_ptr = vec_u32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE +vec_lde int const_signed_char_ptr = vec_s8_load_op vec_lvebx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvebx TRUE FALSE +vec_lde int const_short_ptr = vec_s16_load_op vec_lvehx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvehx TRUE FALSE +vec_lde int const_int_ptr = vec_s32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE +vec_lde int const_long_ptr = vec_s32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE +vec_lde int const_float_ptr = vec_f32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE +vec_ld int const_unsigned_char_ptr = vec_u8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_unsigned_short_ptr = vec_u16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_unsigned_int_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_unsigned_long_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_signed_char_ptr = vec_s8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_short_ptr = vec_s16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_int_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_long_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_float_ptr = vec_f32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ldl int const_unsigned_char_ptr = vec_u8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_unsigned_short_ptr = vec_u16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_unsigned_int_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_unsigned_long_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_signed_char_ptr = vec_s8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_short_ptr = vec_s16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_int_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_long_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_float_ptr = vec_f32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ld int const_vec_u8_ptr = vec_u8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_vec_u16_ptr = vec_u16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_vec_u32_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_vec_s8_ptr = vec_s8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_vec_s16_ptr = vec_s16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_vec_s32_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_vec_p16_ptr = vec_p16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_vec_b8_ptr = vec_b8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_vec_b16_ptr = vec_b16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_vec_b32_ptr = vec_b32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ld int const_vec_f32_ptr = vec_f32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx +vec_ldl int const_vec_u8_ptr = vec_u8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_vec_u16_ptr = vec_u16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_vec_u32_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_vec_s8_ptr = vec_s8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_vec_s16_ptr = vec_s16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_vec_s32_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_vec_p16_ptr = vec_p16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_vec_b8_ptr = vec_b8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_vec_b16_ptr = vec_b16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_vec_b32_ptr = vec_b32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ldl int const_vec_f32_ptr = vec_f32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx +vec_ste vec_u8 int unsigned_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_u16 int unsigned_short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_u32 int unsigned_int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_u32 int unsigned_long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_s8 int signed_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_s16 int short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_s32 int int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_s32 int long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_f32 int float_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_p16 int short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_p16 int unsigned_short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_b8 int unsigned_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_b8 int signed_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_b16 int unsigned_short_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_b16 int short_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_b32 int unsigned_int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_b32 int unsigned_long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_b32 int int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ste vec_b32 int long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_st vec_u8 int unsigned_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_u16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_u32 int unsigned_int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_u32 int unsigned_long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_s8 int signed_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_s16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_s32 int int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_s32 int long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_f32 int float_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_p16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_p16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b8 int unsigned_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b8 int signed_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b32 int unsigned_int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b32 int unsigned_long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b32 int int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b32 int long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_stl vec_u8 int unsigned_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_u16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_u32 int unsigned_int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_u32 int unsigned_long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_s8 int signed_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_s16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_s32 int int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_s32 int long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_f32 int float_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_p16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_p16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b8 int unsigned_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b8 int signed_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b32 int unsigned_int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b32 int unsigned_long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b32 int int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b32 int long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_st vec_u8 int vec_u8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_u16 int vec_u16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_u32 int vec_u32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_s8 int vec_s8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_s16 int vec_s16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_s32 int vec_s32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b8 int vec_b8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b16 int vec_b16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_b32 int vec_b32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_p16 int vec_p16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_st vec_f32 int vec_f32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx +vec_stl vec_u8 int vec_u8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_u16 int vec_u16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_u32 int vec_u32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_s8 int vec_s8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_s16 int vec_s16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_s32 int vec_s32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b8 int vec_b8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b16 int vec_b16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_b32 int vec_b32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_p16 int vec_p16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_stl vec_f32 int vec_f32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx +vec_lvsl int const_volatile_unsigned_char_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE +vec_lvsl int const_volatile_unsigned_short_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE +vec_lvsl int const_volatile_unsigned_int_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE +vec_lvsl int const_volatile_unsigned_long_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE +vec_lvsl int const_volatile_signed_char_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE +vec_lvsl int const_volatile_short_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE +vec_lvsl int const_volatile_int_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE +vec_lvsl int const_volatile_long_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE +vec_lvsl int const_volatile_float_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE +vec_lvsr int const_volatile_unsigned_char_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE +vec_lvsr int const_volatile_unsigned_short_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE +vec_lvsr int const_volatile_unsigned_int_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE +vec_lvsr int const_volatile_unsigned_long_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE +vec_lvsr int const_volatile_signed_char_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE +vec_lvsr int const_volatile_short_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE +vec_lvsr int const_volatile_int_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE +vec_lvsr int const_volatile_long_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE +vec_lvsr int const_volatile_float_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE +vec_mfvscr = volatile_vec_u16 vec_mfvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mtvscr vec_u8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mtvscr vec_u16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mtvscr vec_u32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mtvscr vec_s8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mtvscr vec_s16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mtvscr vec_s32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mtvscr vec_b8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mtvscr vec_b16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mtvscr vec_b32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_mtvscr vec_p16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_dst const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_short_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_int_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_long_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_float_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dstt const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_short_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_int_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_long_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_float_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstst const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_short_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_int_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_long_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_float_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dststt const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_short_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_int_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_long_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_float_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dst const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dst const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE +vec_dstt const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstt const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE +vec_dstst const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dstst const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE +vec_dststt const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dststt const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE +vec_dss immed_u2 = volatile_void_load_op vec_dss BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_dssall = volatile_void_load_op vec_dssall BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_add vec_f32 vec_f32 = vec_f32 vec_vaddfp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_sub vec_f32 vec_f32 = vec_f32 vec_vsubfp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_madd vec_f32 vec_f32 vec_f32 = vec_f32 vec_vmaddfp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_nmsub vec_f32 vec_f32 vec_f32 = vec_f32 vec_vnmsubfp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpgt vec_f32 vec_f32 = vec_b32 vec_vcmpgtfp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpge vec_f32 vec_f32 = vec_b32 vec_vcmpgefp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpeq vec_f32 vec_f32 = vec_b32 vec_vcmpeqfp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cmpb vec_f32 vec_f32 = vec_s32 vec_vcmpbfp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_max vec_f32 vec_f32 = vec_f32 vec_vmaxfp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_min vec_f32 vec_f32 = vec_f32 vec_vminfp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_re vec_f32 = vec_f32 vec_vrefp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_rsqrte vec_f32 = vec_f32 vec_vrsqrtefp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_loge vec_f32 = vec_f32 vec_vlogefp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_expte vec_f32 = vec_f32 vec_vexptefp BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_trunc vec_f32 = vec_f32 vec_vrfiz BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_round vec_f32 = vec_f32 vec_vrfin BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ceil vec_f32 = vec_f32 vec_vrfip BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_floor vec_f32 = vec_f32 vec_vrfim BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ctf vec_u32 immed_u5 = vec_f32 vec_vcfux BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ctf vec_s32 immed_u5 = vec_f32 vec_vcfsx BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_ctu vec_f32 immed_u5 = vec_u32 vec_vctuxs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_cts vec_f32 immed_u5 = vec_s32 vec_vctsxs BUILTIN_AFTER_TRAVERSE CFG_VEC +vec_all_gt vec_u8 vec_u8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_gt vec_u8 vec_b8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_gt vec_b8 vec_u8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_le vec_u8 vec_u8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_le vec_u8 vec_b8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_le vec_b8 vec_u8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_gt vec_u8 vec_u8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_gt vec_u8 vec_b8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_gt vec_b8 vec_u8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_le vec_u8 vec_u8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_le vec_u8 vec_b8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_le vec_b8 vec_u8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_gt vec_s8 vec_s8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_gt vec_s8 vec_b8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_gt vec_b8 vec_s8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_le vec_s8 vec_s8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_le vec_s8 vec_b8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_le vec_b8 vec_s8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_gt vec_s8 vec_s8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_gt vec_s8 vec_b8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_gt vec_b8 vec_s8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_le vec_s8 vec_s8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_le vec_s8 vec_b8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_le vec_b8 vec_s8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_lt vec_u8 vec_u8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_lt vec_u8 vec_b8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_lt vec_b8 vec_u8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_ge vec_u8 vec_u8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_ge vec_u8 vec_b8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_ge vec_b8 vec_u8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_lt vec_u8 vec_u8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_lt vec_u8 vec_b8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_lt vec_b8 vec_u8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_ge vec_u8 vec_u8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_ge vec_u8 vec_b8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_any_ge vec_b8 vec_u8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD +vec_all_lt vec_s8 vec_s8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_lt vec_s8 vec_b8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_lt vec_b8 vec_s8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_ge vec_s8 vec_s8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_ge vec_s8 vec_b8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_ge vec_b8 vec_s8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_lt vec_s8 vec_s8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_lt vec_s8 vec_b8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_lt vec_b8 vec_s8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_ge vec_s8 vec_s8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_ge vec_s8 vec_b8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_any_ge vec_b8 vec_s8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD +vec_all_gt vec_u16 vec_u16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_gt vec_u16 vec_b16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_gt vec_b16 vec_u16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_le vec_u16 vec_u16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_le vec_u16 vec_b16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_le vec_b16 vec_u16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_gt vec_u16 vec_u16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_gt vec_u16 vec_b16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_gt vec_b16 vec_u16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_le vec_u16 vec_u16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_le vec_u16 vec_b16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_le vec_b16 vec_u16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_gt vec_s16 vec_s16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_gt vec_s16 vec_b16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_gt vec_b16 vec_s16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_le vec_s16 vec_s16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_le vec_s16 vec_b16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_le vec_b16 vec_s16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_gt vec_s16 vec_s16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_gt vec_s16 vec_b16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_gt vec_b16 vec_s16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_le vec_s16 vec_s16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_le vec_s16 vec_b16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_le vec_b16 vec_s16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_lt vec_u16 vec_u16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_lt vec_u16 vec_b16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_lt vec_b16 vec_u16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_ge vec_u16 vec_u16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_ge vec_u16 vec_b16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_ge vec_b16 vec_u16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_lt vec_u16 vec_u16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_lt vec_u16 vec_b16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_lt vec_b16 vec_u16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_ge vec_u16 vec_u16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_ge vec_u16 vec_b16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_any_ge vec_b16 vec_u16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD +vec_all_lt vec_s16 vec_s16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_lt vec_s16 vec_b16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_lt vec_b16 vec_s16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_ge vec_s16 vec_s16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_ge vec_s16 vec_b16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_ge vec_b16 vec_s16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_lt vec_s16 vec_s16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_lt vec_s16 vec_b16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_lt vec_b16 vec_s16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_ge vec_s16 vec_s16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_ge vec_s16 vec_b16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_any_ge vec_b16 vec_s16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD +vec_all_gt vec_u32 vec_u32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_gt vec_u32 vec_b32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_gt vec_b32 vec_u32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_le vec_u32 vec_u32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_le vec_u32 vec_b32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_le vec_b32 vec_u32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_gt vec_u32 vec_u32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_gt vec_u32 vec_b32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_gt vec_b32 vec_u32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_le vec_u32 vec_u32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_le vec_u32 vec_b32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_le vec_b32 vec_u32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_gt vec_s32 vec_s32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_gt vec_s32 vec_b32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_gt vec_b32 vec_s32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_le vec_s32 vec_s32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_le vec_s32 vec_b32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_le vec_b32 vec_s32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_gt vec_s32 vec_s32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_gt vec_s32 vec_b32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_gt vec_b32 vec_s32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_le vec_s32 vec_s32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_le vec_s32 vec_b32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_le vec_b32 vec_s32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_lt vec_u32 vec_u32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_lt vec_u32 vec_b32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_lt vec_b32 vec_u32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_ge vec_u32 vec_u32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_ge vec_u32 vec_b32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_ge vec_b32 vec_u32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_lt vec_u32 vec_u32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_lt vec_u32 vec_b32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_lt vec_b32 vec_u32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_ge vec_u32 vec_u32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_ge vec_u32 vec_b32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_any_ge vec_b32 vec_u32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD +vec_all_lt vec_s32 vec_s32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_lt vec_s32 vec_b32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_lt vec_b32 vec_s32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_ge vec_s32 vec_s32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_ge vec_s32 vec_b32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_ge vec_b32 vec_s32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_lt vec_s32 vec_s32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_lt vec_s32 vec_b32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_lt vec_b32 vec_s32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_ge vec_s32 vec_s32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_ge vec_s32 vec_b32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_any_ge vec_b32 vec_s32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD +vec_all_eq vec_u8 vec_u8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_eq vec_u8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_eq vec_b8 vec_u8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_eq vec_b8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_ne vec_u8 vec_u8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_ne vec_u8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_ne vec_b8 vec_u8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_ne vec_b8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_eq vec_u8 vec_u8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_eq vec_u8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_eq vec_b8 vec_u8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_eq vec_b8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_ne vec_u8 vec_u8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_ne vec_u8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_ne vec_b8 vec_u8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_ne vec_b8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_eq vec_s8 vec_s8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_eq vec_s8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_eq vec_b8 vec_s8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_ne vec_s8 vec_s8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_ne vec_s8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_ne vec_b8 vec_s8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_eq vec_s8 vec_s8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_eq vec_s8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_eq vec_b8 vec_s8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_ne vec_s8 vec_s8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_ne vec_s8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_any_ne vec_b8 vec_s8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD +vec_all_eq vec_u16 vec_u16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_eq vec_u16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_eq vec_b16 vec_u16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_eq vec_b16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_eq vec_p16 vec_p16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_ne vec_u16 vec_u16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_ne vec_u16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_ne vec_b16 vec_u16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_ne vec_b16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_ne vec_p16 vec_p16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_eq vec_u16 vec_u16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_eq vec_u16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_eq vec_b16 vec_u16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_eq vec_b16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_eq vec_p16 vec_p16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_ne vec_u16 vec_u16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_ne vec_u16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_ne vec_b16 vec_u16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_ne vec_b16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_ne vec_p16 vec_p16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_eq vec_s16 vec_s16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_eq vec_s16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_eq vec_b16 vec_s16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_ne vec_s16 vec_s16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_ne vec_s16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_ne vec_b16 vec_s16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_eq vec_s16 vec_s16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_eq vec_s16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_eq vec_b16 vec_s16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_ne vec_s16 vec_s16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_ne vec_s16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_any_ne vec_b16 vec_s16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD +vec_all_eq vec_u32 vec_u32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_eq vec_u32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_eq vec_b32 vec_u32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_eq vec_b32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_ne vec_u32 vec_u32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_ne vec_u32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_ne vec_b32 vec_u32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_ne vec_b32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_eq vec_u32 vec_u32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_eq vec_u32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_eq vec_b32 vec_u32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_eq vec_b32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_ne vec_u32 vec_u32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_ne vec_u32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_ne vec_b32 vec_u32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_ne vec_b32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_eq vec_s32 vec_s32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_eq vec_s32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_eq vec_b32 vec_s32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_ne vec_s32 vec_s32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_ne vec_s32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_ne vec_b32 vec_s32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_eq vec_s32 vec_s32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_eq vec_s32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_eq vec_b32 vec_s32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_ne vec_s32 vec_s32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_ne vec_s32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_any_ne vec_b32 vec_s32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD +vec_all_gt vec_f32 vec_f32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD +vec_all_ngt vec_f32 vec_f32 = cc26t vec_all_ngt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD +vec_any_ngt vec_f32 vec_f32 = cc24f vec_any_ngt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD +vec_any_gt vec_f32 vec_f32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD +vec_all_lt vec_f32 vec_f32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD +vec_all_nlt vec_f32 vec_f32 = cc26tr vec_all_nlt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD +vec_any_nlt vec_f32 vec_f32 = cc24fr vec_any_nlt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD +vec_any_lt vec_f32 vec_f32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD +vec_all_ge vec_f32 vec_f32 = cc24t vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD +vec_all_nge vec_f32 vec_f32 = cc26t vec_all_nge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD +vec_any_nge vec_f32 vec_f32 = cc24f vec_any_nge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD +vec_any_ge vec_f32 vec_f32 = cc26f vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD +vec_all_le vec_f32 vec_f32 = cc24tr vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD +vec_all_nle vec_f32 vec_f32 = cc26tr vec_all_nle BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD +vec_any_nle vec_f32 vec_f32 = cc24fr vec_any_nle BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD +vec_any_le vec_f32 vec_f32 = cc26fr vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD +vec_all_eq vec_f32 vec_f32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD +vec_all_ne vec_f32 vec_f32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD +vec_any_ne vec_f32 vec_f32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD +vec_any_eq vec_f32 vec_f32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD +vec_all_numeric vec_f32 = cc24td vec_all_numeric BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD +vec_all_nan vec_f32 = cc26td vec_all_nan BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD +vec_any_nan vec_f32 = cc24fd vec_any_nan BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD +vec_any_numeric vec_f32 = cc26fd vec_any_numeric BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD +vec_all_in vec_f32 vec_f32 = cc26t vec_all_in BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpbfpD +vec_any_out vec_f32 vec_f32 = cc26f vec_any_out BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpbfpD |