aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/rs6000
diff options
context:
space:
mode:
author(no author) <(no author)@138bc75d-0d04-0410-961f-82ee72b054a4>2005-04-22 02:26:22 +0000
committer(no author) <(no author)@138bc75d-0d04-0410-961f-82ee72b054a4>2005-04-22 02:26:22 +0000
commit94528b8483213ee766fee7bc1917caf5a301f448 (patch)
treeb53a754e6b06b616e174ba10db778fee72544ffc /gcc/config/rs6000
parent0fcc981fa6eb41869dce802e289b012f4c3726be (diff)
This commit was manufactured by cvs2svn to create tagapple/gcc-5010
'apple-gcc-5010'. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/tags/apple-gcc-5010@98539 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/rs6000')
-rw-r--r--gcc/config/rs6000/aix.h3
-rw-r--r--gcc/config/rs6000/altivec.h15
-rw-r--r--gcc/config/rs6000/altivec.md74
-rw-r--r--gcc/config/rs6000/builtin.ops297
-rw-r--r--gcc/config/rs6000/darwin-fallback.c115
-rw-r--r--gcc/config/rs6000/darwin-ldouble.c2
-rw-r--r--gcc/config/rs6000/darwin-tramp.asm2
-rw-r--r--gcc/config/rs6000/darwin.h180
-rw-r--r--gcc/config/rs6000/darwin.md58
-rw-r--r--gcc/config/rs6000/host-darwin.c71
-rwxr-xr-xgcc/config/rs6000/ops-to-gp620
-rw-r--r--gcc/config/rs6000/rs6000-c.c162
-rw-r--r--gcc/config/rs6000/rs6000-protos.h13
-rw-r--r--gcc/config/rs6000/rs6000.c3005
-rw-r--r--gcc/config/rs6000/rs6000.h486
-rw-r--r--gcc/config/rs6000/rs6000.md290
-rw-r--r--gcc/config/rs6000/spe.md15
-rw-r--r--gcc/config/rs6000/sysv4.h5
-rw-r--r--gcc/config/rs6000/t-aix4319
-rw-r--r--gcc/config/rs6000/t-aix5219
-rw-r--r--gcc/config/rs6000/t-darwin11
-rw-r--r--gcc/config/rs6000/t-darwin84
-rw-r--r--gcc/config/rs6000/t-rs60003
-rw-r--r--gcc/config/rs6000/t-rtems94
-rw-r--r--gcc/config/rs6000/vec.h4515
-rw-r--r--gcc/config/rs6000/vec.ops1025
-rw-r--r--gcc/config/rs6000/x-darwin10
27 files changed, 10459 insertions, 654 deletions
diff --git a/gcc/config/rs6000/aix.h b/gcc/config/rs6000/aix.h
index 440fe02c3a2..82d7ec7f375 100644
--- a/gcc/config/rs6000/aix.h
+++ b/gcc/config/rs6000/aix.h
@@ -264,3 +264,6 @@
32-bit mode. */
#define OS_MISSING_POWERPC64 1
#define OS_MISSING_ALTIVEC 1
+
+/* WINT_TYPE */
+#define WINT_TYPE "int"
diff --git a/gcc/config/rs6000/altivec.h b/gcc/config/rs6000/altivec.h
index 0447d9e1fc4..13902ab3b59 100644
--- a/gcc/config/rs6000/altivec.h
+++ b/gcc/config/rs6000/altivec.h
@@ -36,17 +36,18 @@
#error Use the "-maltivec" flag to enable PowerPC AltiVec support
#endif
-/* If __APPLE_ALTIVEC__ is defined, the compiler supports 'vector',
- 'pixel' and 'bool' as context-sensitive AltiVec keywords (in
- non-AltiVec contexts, they revert to their original meanings,
- if any), so we do not need to define them as macros. */
+/* APPLE LOCAL begin AltiVec */
+/* If __APPLE_ALTIVEC__ is defined, the compiler has internally
+ synthesized the definitions contained in this header. */
-#if !defined(__APPLE_ALTIVEC__)
+#if defined(__APPLE_ALTIVEC__)
+#warning Ignoring <altivec.h> because "-faltivec" specified
+#else
/* You are allowed to undef these for C++ compatibility. */
#define vector __vector
#define pixel __pixel
#define bool __bool
-#endif
+/* APPLE LOCAL end AltiVec */
/* Condition register codes for AltiVec predicates. */
@@ -11569,5 +11570,7 @@ __ch (__bin_args_eq (__vector float, (a1), __vector float, (a2)), \
#endif /* __cplusplus */
+/* APPLE LOCAL AltiVec */
+#endif /* __APPLE_ALTIVEC__ */
#endif /* _ALTIVEC_H */
diff --git a/gcc/config/rs6000/altivec.md b/gcc/config/rs6000/altivec.md
index 9e98ffe1a6b..2dc4ed9c7fa 100644
--- a/gcc/config/rs6000/altivec.md
+++ b/gcc/config/rs6000/altivec.md
@@ -474,6 +474,66 @@
DONE;
}")
+;; APPLE LOCAL begin 3972875 mainline 2005-04-18
+;; 32 bit integer multiplication
+;; A_high = Operand_0 & 0xFFFF0000 >> 16
+;; A_low = Operand_0 & 0xFFFF
+;; B_high = Operand_1 & 0xFFFF0000 >> 16
+;; B_low = Operand_1 & 0xFFFF
+;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
+
+;; (define_insn "mulv4si3"
+;; [(set (match_operand:V4SI 0 "register_operand" "=v")
+;; (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
+;; (match_operand:V4SI 2 "register_operand" "v")))]
+(define_expand "mulv4si3"
+ [(use (match_operand:V4SI 0 "register_operand" ""))
+ (use (match_operand:V4SI 1 "register_operand" ""))
+ (use (match_operand:V4SI 2 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+ {
+ rtx zero;
+ rtx swap;
+ rtx small_swap;
+ rtx sixteen;
+ rtx one;
+ rtx two;
+ rtx low_product;
+ rtx high_product;
+
+ zero = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
+
+ sixteen = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (sixteen, gen_rtx_CONST_INT (V4SImode, -16)));
+
+ swap = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vrlw (swap, operands[2], sixteen));
+
+ one = gen_reg_rtx (V8HImode);
+ convert_move (one, operands[1], 0);
+
+ two = gen_reg_rtx (V8HImode);
+ convert_move (two, operands[2], 0);
+
+ small_swap = gen_reg_rtx (V8HImode);
+ convert_move (small_swap, swap, 0);
+
+ low_product = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vmulouh (low_product, one, two));
+
+ high_product = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
+
+ emit_insn (gen_altivec_vslw (high_product, high_product, sixteen));
+
+ emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
+
+ DONE;
+ }")
+;; APPLE LOCAL end 3972875 mainline 2005-04-18
+
;; Fused multiply subtract
(define_insn "altivec_vnmsubfp"
[(set (match_operand:V4SF 0 "register_operand" "=v")
@@ -968,21 +1028,23 @@
"vslo %0,%1,%2"
[(set_attr "type" "vecperm")])
-(define_insn "altivec_vsr<VI_char>"
+; APPLE LOCAL begin mainline 2005-04-05 3972515
+(define_insn "lshr<mode>3"
[(set (match_operand:VI 0 "register_operand" "=v")
- (unspec:VI [(match_operand:VI 1 "register_operand" "v")
- (match_operand:VI 2 "register_operand" "v")] 112))]
+ (lshiftrt:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v") ))]
"TARGET_ALTIVEC"
"vsr<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
-(define_insn "altivec_vsra<VI_char>"
+(define_insn "ashr<mode>3"
[(set (match_operand:VI 0 "register_operand" "=v")
- (unspec:VI [(match_operand:VI 1 "register_operand" "v")
- (match_operand:VI 2 "register_operand" "v")] 115))]
+ (ashiftrt:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v") ))]
"TARGET_ALTIVEC"
"vsra<VI_char> %0,%1,%2"
[(set_attr "type" "vecsimple")])
+; APPLE LOCAL end mainline 2005-04-05 3972515
(define_insn "altivec_vsr"
[(set (match_operand:V4SI 0 "register_operand" "=v")
diff --git a/gcc/config/rs6000/builtin.ops b/gcc/config/rs6000/builtin.ops
new file mode 100644
index 00000000000..a28e35654fc
--- /dev/null
+++ b/gcc/config/rs6000/builtin.ops
@@ -0,0 +1,297 @@
+# APPLE LOCAL file AltiVec
+# ops-to-gp -gcc vec.ops builtin.ops
+# @ betype betype-code type-spelling
+@ @ float BETYPE_R4 float
+@ @ ushort BETYPE_U4 unsigned=short
+@ @ uint BETYPE_U4 unsigned=int
+@ @ ulong BETYPE_U4 unsigned=long
+@ @ immed_u2 U2 0..3
+@ @ immed_u4 U4 0..15
+@ @ immed_s5 I5 -16..15
+@ @ immed_u5 U5 0..31
+@ @ int BETYPE_I4 int
+@ @ long BETYPE_I4 long
+@ @ ptr PTR void=*
+@ @ v16 BETYPE_V16 vec_type
+@ @ void BETYPE_I4 void
+# fetype betype [code [spelling]]
+@ float_ptr ptr i float=*
+@ const_float_ptr ptr i float=*
+@ const_volatile_float_ptr ptr i float=*
+@ int int i
+@ int_ptr ptr i int=*
+@ long_ptr ptr i long=*
+@ const_int_ptr ptr i int=*
+@ const_long_ptr ptr i long=*
+@ const_volatile_int_ptr ptr i int=*
+@ const_volatile_long_ptr ptr i long=*
+@ immed_s5 immed_s5 A
+@ immed_u5 immed_u5 B
+@ immed_u4 immed_u4 C
+@ immed_u2 immed_u2 D
+@ cc24f int j=24=f
+@ cc24fd int j=24=f=d
+@ cc24fr int j=24=f=r
+@ cc24t int j=24=t
+@ cc24td int j=24=t=d
+@ cc24tr int j=24=t=r
+@ cc26f int j=26=f
+@ cc26fd int j=26=f=d
+@ cc26fr int j=26=f=r
+@ cc26t int j=26=t
+@ cc26td int j=26=t=d
+@ cc26tr int j=26=t=r
+@ short_ptr ptr i short=*
+@ signed_char_ptr ptr i signed=char=*
+@ unsigned_char_ptr ptr i unsigned=char=*
+@ unsigned_short_ptr ptr i unsigned=short=*
+@ unsigned_int_ptr ptr i unsigned=int=*
+@ unsigned_long_ptr ptr i unsigned=long=*
+@ const_short_ptr ptr i short=*
+@ const_signed_char_ptr ptr i signed=char=*
+@ const_unsigned_char_ptr ptr i unsigned=char=*
+@ const_unsigned_short_ptr ptr i unsigned=short=*
+@ const_unsigned_int_ptr ptr i unsigned=int=*
+@ const_unsigned_long_ptr ptr i unsigned=long=*
+@ const_volatile_short_ptr ptr i short=*
+@ const_volatile_signed_char_ptr ptr i signed=char=*
+@ const_volatile_unsigned_char_ptr ptr i unsigned=char=*
+@ const_volatile_unsigned_short_ptr ptr i unsigned=short=*
+@ const_volatile_unsigned_int_ptr ptr i unsigned=int=*
+@ const_volatile_unsigned_long_ptr ptr i unsigned=long=*
+@ vec_b16 v16 x vec_b16
+@ vec_b16_load_op v16 xl vec_b16
+@ vec_b16_ptr ptr i vec_b16=*
+@ const_vec_b16_ptr ptr i vec_b16=*
+@ vec_b32 v16 x vec_b32
+@ vec_b32_load_op v16 xl vec_b32
+@ vec_b32_ptr ptr i vec_b32=*
+@ const_vec_b32_ptr ptr i vec_b32=*
+@ vec_b8 v16 x vec_b8
+@ vec_b8_load_op v16 xl vec_b8
+@ vec_b8_ptr ptr i vec_b8=*
+@ const_vec_b8_ptr ptr i vec_b8=*
+@ vec_f32 v16 x vec_f32
+@ vec_f32_load_op v16 xl vec_f32
+@ vec_f32_ptr ptr i vec_f32=*
+@ const_vec_f32_ptr ptr i vec_f32=*
+@ vec_p16 v16 x vec_p16
+@ vec_p16_load_op v16 xl vec_p16
+@ vec_p16_ptr ptr i vec_p16=*
+@ const_vec_p16_ptr ptr i vec_p16=*
+@ vec_s16 v16 x vec_s16
+@ vec_s16_load_op v16 xl vec_s16
+@ vec_s16_ptr ptr i vec_s16=*
+@ const_vec_s16_ptr ptr i vec_s16=*
+@ vec_s32 v16 x vec_s32
+@ vec_s32_load_op v16 xl vec_s32
+@ vec_s32_ptr ptr i vec_s32=*
+@ const_vec_s32_ptr ptr i vec_s32=*
+@ vec_s8 v16 x vec_s8
+@ vec_s8_load_op v16 xl vec_s8
+@ vec_s8_ptr ptr i vec_s8=*
+@ const_vec_s8_ptr ptr i vec_s8=*
+@ vec_u16 v16 x vec_u16
+@ vec_u16_load_op v16 xl vec_u16
+@ vec_u16_ptr ptr i vec_u16=*
+@ const_vec_u16_ptr ptr i vec_u16=*
+@ vec_u32 v16 x vec_u32
+@ vec_u32_load_op v16 xl vec_u32
+@ vec_u32_ptr ptr i vec_u32=*
+@ const_vec_u32_ptr ptr i vec_u32=*
+@ vec_u8 v16 x vec_u8
+@ vec_u8_load_op v16 xl vec_u8
+@ vec_u8_ptr ptr i vec_u8=*
+@ const_vec_u8_ptr ptr i vec_u8=*
+@ void_store_op void s
+@ volatile_void void v
+@ volatile_void_load_op void vl
+@ volatile_void_store_op void vs
+@ volatile_vec_u16 v16 vx vec_u16
+@ char_ptr ptr i char=*
+@ const_char_ptr ptr i char=*
+# @ @ instruction type
+@ @ @ MOP_mfvscr fxu
+@ @ @ MOP_mtvscr fxu
+@ @ @ MOP_dss load
+@ @ @ MOP_dssall load
+@ @ @ MOP_dst load
+@ @ @ MOP_dstst load
+@ @ @ MOP_dststt load
+@ @ @ MOP_dstt load
+@ @ @ MOP_lvebx load
+@ @ @ MOP_lvehx load
+@ @ @ MOP_lvewx load
+@ @ @ MOP_lvsl load
+@ @ @ MOP_lvsr load
+@ @ @ MOP_lvx load
+@ @ @ MOP_lvxl load
+@ @ @ MOP_stvebx store
+@ @ @ MOP_stvehx store
+@ @ @ MOP_stvewx store
+@ @ @ MOP_stvx store
+@ @ @ MOP_stvxl store
+@ @ @ MOP_vaddcuw simple
+@ @ @ MOP_vaddfp fp
+@ @ @ MOP_vaddsbs simple
+@ @ @ MOP_vaddshs simple
+@ @ @ MOP_vaddsws simple
+@ @ @ MOP_vaddubm simple
+@ @ @ MOP_vaddubs simple
+@ @ @ MOP_vadduhm simple
+@ @ @ MOP_vadduhs simple
+@ @ @ MOP_vadduwm simple
+@ @ @ MOP_vadduws simple
+@ @ @ MOP_vand simple
+@ @ @ MOP_vandc simple
+@ @ @ MOP_vavgsb simple
+@ @ @ MOP_vavgsh simple
+@ @ @ MOP_vavgsw simple
+@ @ @ MOP_vavgub simple
+@ @ @ MOP_vavguh simple
+@ @ @ MOP_vavguw simple
+@ @ @ MOP_vcfsx fp
+@ @ @ MOP_vcfux fp
+@ @ @ MOP_vcmpbfp simple
+@ @ @ MOP_vcmpbfpD simple
+@ @ @ MOP_vcmpeqfp simple
+@ @ @ MOP_vcmpeqfpD simple
+@ @ @ MOP_vcmpequb simple
+@ @ @ MOP_vcmpequbD simple
+@ @ @ MOP_vcmpequh simple
+@ @ @ MOP_vcmpequhD simple
+@ @ @ MOP_vcmpequw simple
+@ @ @ MOP_vcmpequwD simple
+@ @ @ MOP_vcmpgefp simple
+@ @ @ MOP_vcmpgefpD simple
+@ @ @ MOP_vcmpgtfp simple
+@ @ @ MOP_vcmpgtfpD simple
+@ @ @ MOP_vcmpgtsb simple
+@ @ @ MOP_vcmpgtsbD simple
+@ @ @ MOP_vcmpgtsh simple
+@ @ @ MOP_vcmpgtshD simple
+@ @ @ MOP_vcmpgtsw simple
+@ @ @ MOP_vcmpgtswD simple
+@ @ @ MOP_vcmpgtub simple
+@ @ @ MOP_vcmpgtubD simple
+@ @ @ MOP_vcmpgtuh simple
+@ @ @ MOP_vcmpgtuhD simple
+@ @ @ MOP_vcmpgtuw simple
+@ @ @ MOP_vcmpgtuwD simple
+@ @ @ MOP_vctsxs fp
+@ @ @ MOP_vctuxs fp
+@ @ @ MOP_vexptefp fp
+@ @ @ MOP_vlogefp fp
+@ @ @ MOP_vmaddfp fp
+@ @ @ MOP_vmaxfp simple
+@ @ @ MOP_vmaxsb simple
+@ @ @ MOP_vmaxsh simple
+@ @ @ MOP_vmaxsw simple
+@ @ @ MOP_vmaxub simple
+@ @ @ MOP_vmaxuh simple
+@ @ @ MOP_vmaxuw simple
+@ @ @ MOP_vmhaddshs complex
+@ @ @ MOP_vmhraddshs complex
+@ @ @ MOP_vminfp simple
+@ @ @ MOP_vminsb simple
+@ @ @ MOP_vminsh simple
+@ @ @ MOP_vminsw simple
+@ @ @ MOP_vminub simple
+@ @ @ MOP_vminuh simple
+@ @ @ MOP_vminuw simple
+@ @ @ MOP_vmladduhm complex
+@ @ @ MOP_vmrghb perm
+@ @ @ MOP_vmrghh perm
+@ @ @ MOP_vmrghw perm
+@ @ @ MOP_vmrglb perm
+@ @ @ MOP_vmrglh perm
+@ @ @ MOP_vmrglw perm
+@ @ @ MOP_vmsummbm complex
+@ @ @ MOP_vmsumshm complex
+@ @ @ MOP_vmsumshs complex
+@ @ @ MOP_vmsumubm complex
+@ @ @ MOP_vmsumuhm complex
+@ @ @ MOP_vmsumuhs complex
+@ @ @ MOP_vmulesb complex
+@ @ @ MOP_vmulesh complex
+@ @ @ MOP_vmuleub complex
+@ @ @ MOP_vmuleuh complex
+@ @ @ MOP_vmulosb complex
+@ @ @ MOP_vmulosh complex
+@ @ @ MOP_vmuloub complex
+@ @ @ MOP_vmulouh complex
+@ @ @ MOP_vnmsubfp fp
+@ @ @ MOP_vnor simple
+@ @ @ MOP_vor simple
+@ @ @ MOP_vperm perm
+@ @ @ MOP_vpkpx perm
+@ @ @ MOP_vpkshss perm
+@ @ @ MOP_vpkshus perm
+@ @ @ MOP_vpkswss perm
+@ @ @ MOP_vpkswus perm
+@ @ @ MOP_vpkuhum perm
+@ @ @ MOP_vpkuhus perm
+@ @ @ MOP_vpkuwum perm
+@ @ @ MOP_vpkuwus perm
+@ @ @ MOP_vrefp fp
+@ @ @ MOP_vrfim fp
+@ @ @ MOP_vrfin fp
+@ @ @ MOP_vrfip fp
+@ @ @ MOP_vrfiz fp
+@ @ @ MOP_vrlb simple
+@ @ @ MOP_vrlh simple
+@ @ @ MOP_vrlw simple
+@ @ @ MOP_vrsqrtefp fp
+@ @ @ MOP_vsel simple
+@ @ @ MOP_vsl simple
+@ @ @ MOP_vslb simple
+@ @ @ MOP_vsldoi perm
+@ @ @ MOP_vslh simple
+@ @ @ MOP_vslo perm_bug
+@ @ @ MOP_vslw simple
+@ @ @ MOP_vspltb perm
+@ @ @ MOP_vsplth perm
+@ @ @ MOP_vspltisb perm
+@ @ @ MOP_vspltish perm
+@ @ @ MOP_vspltisw perm
+@ @ @ MOP_vspltw perm
+@ @ @ MOP_vsr simple
+@ @ @ MOP_vsrab simple
+@ @ @ MOP_vsrah simple
+@ @ @ MOP_vsraw simple
+@ @ @ MOP_vsrb simple
+@ @ @ MOP_vsrh simple
+@ @ @ MOP_vsro perm_bug
+@ @ @ MOP_vsrw simple
+@ @ @ MOP_vsubcuw simple
+@ @ @ MOP_vsubfp fp
+@ @ @ MOP_vsubsbs simple
+@ @ @ MOP_vsubshs simple
+@ @ @ MOP_vsubsws simple
+@ @ @ MOP_vsububm simple
+@ @ @ MOP_vsububs simple
+@ @ @ MOP_vsubuhm simple
+@ @ @ MOP_vsubuhs simple
+@ @ @ MOP_vsubuwm simple
+@ @ @ MOP_vsubuws simple
+@ @ @ MOP_vsum2sws complex
+@ @ @ MOP_vsum4sbs complex
+@ @ @ MOP_vsum4shs complex
+@ @ @ MOP_vsum4ubs complex
+@ @ @ MOP_vsumsws complex
+@ @ @ MOP_vupkhpx perm
+@ @ @ MOP_vupkhsb perm
+@ @ @ MOP_vupkhsh perm
+@ @ @ MOP_vupklpx perm
+@ @ @ MOP_vupklsb perm
+@ @ @ MOP_vupklsh perm
+@ @ @ MOP_vxor simple
+# The vec_abs and vec_abss operations identify their variants with insn_name.
+# Map these into a valid insn code (xfx_perm).
+@ @ @ 1 perm
+@ @ @ 2 perm
+@ @ @ 3 perm
+@ @ @ 4 perm
+@ @ @ 5 perm
+@ @ @ 6 perm
+@ @ @ 7 perm
diff --git a/gcc/config/rs6000/darwin-fallback.c b/gcc/config/rs6000/darwin-fallback.c
index a9ef4a7985e..0815a2a8fdb 100644
--- a/gcc/config/rs6000/darwin-fallback.c
+++ b/gcc/config/rs6000/darwin-fallback.c
@@ -1,3 +1,4 @@
+/* APPLE LOCAL file mainline 2005-03-25 3941951 */
/* Fallback frame-state unwinder for Darwin.
Copyright (C) 2004, 2005 Free Software Foundation, Inc.
@@ -36,9 +37,8 @@
#include "unwind-dw2.h"
#include <stdint.h>
#include <stdbool.h>
+#include <sys/types.h>
#include <signal.h>
-#include <ucontext.h>
-#include <mach/thread_status.h>
typedef unsigned long reg_unit;
@@ -225,6 +225,11 @@ interpret_libc (reg_unit gprs[32], struct _Unwind_Context *context)
}
}
+/* We used to include <ucontext.h> and <mach/thread_status.h>,
+ but they change so much between different Darwin system versions
+ that it's much easier to just write the structures involved here
+ directly. */
+
/* These defines are from the kernel's bsd/dev/ppc/unix_signal.c. */
#define UC_TRAD 1
#define UC_TRAD_VEC 6
@@ -237,6 +242,44 @@ interpret_libc (reg_unit gprs[32], struct _Unwind_Context *context)
#define UC_DUAL 50
#define UC_DUAL_VEC 55
+struct gcc_ucontext
+{
+ int onstack;
+ sigset_t sigmask;
+ void * stack_sp;
+ size_t stack_sz;
+ int stack_flags;
+ struct gcc_ucontext *link;
+ size_t mcsize;
+ struct gcc_mcontext32 *mcontext;
+};
+
+struct gcc_float_vector_state
+{
+ double fpregs[32];
+ uint32_t fpscr_pad;
+ uint32_t fpscr;
+ uint32_t save_vr[32][4];
+ uint32_t save_vscr[4];
+};
+
+struct gcc_mcontext32 {
+ uint32_t dar;
+ uint32_t dsisr;
+ uint32_t exception;
+ uint32_t padding1[5];
+ uint32_t srr0;
+ uint32_t srr1;
+ uint32_t gpr[32];
+ uint32_t cr;
+ uint32_t xer;
+ uint32_t lr;
+ uint32_t ctr;
+ uint32_t mq;
+ uint32_t vrsave;
+ struct gcc_float_vector_state fvs;
+};
+
/* These are based on /usr/include/ppc/ucontext.h and
/usr/include/mach/ppc/thread_status.h, but rewritten to be more
convenient, to compile on Jaguar, and to work around Radar 3712064
@@ -256,17 +299,16 @@ struct gcc_mcontext64 {
uint32_t lr[2];
uint32_t ctr[2];
uint32_t vrsave;
- ppc_float_state_t fs;
- ppc_vector_state_t vs;
+ struct gcc_float_vector_state fvs;
};
#define UC_FLAVOR_SIZE \
- (sizeof (struct mcontext) - sizeof (ppc_vector_state_t))
+ (sizeof (struct gcc_mcontext32) - 33*16)
-#define UC_FLAVOR_VEC_SIZE (sizeof (struct mcontext))
+#define UC_FLAVOR_VEC_SIZE (sizeof (struct gcc_mcontext32))
#define UC_FLAVOR64_SIZE \
- (sizeof (struct gcc_mcontext64) - sizeof (ppc_vector_state_t))
+ (sizeof (struct gcc_mcontext64) - 33*16)
#define UC_FLAVOR64_VEC_SIZE (sizeof (struct gcc_mcontext64))
@@ -278,10 +320,9 @@ static bool
handle_syscall (_Unwind_FrameState *fs, const reg_unit gprs[32],
_Unwind_Ptr old_cfa)
{
- ucontext_t *uctx;
+ struct gcc_ucontext *uctx;
bool is_64, is_vector;
- ppc_float_state_t *float_state;
- ppc_vector_state_t *vector_state;
+ struct gcc_float_vector_state * float_vector_state;
_Unwind_Ptr new_cfa;
int i;
static _Unwind_Ptr return_addr;
@@ -293,16 +334,16 @@ handle_syscall (_Unwind_FrameState *fs, const reg_unit gprs[32],
if (gprs[0] == 0x67 /* SYS_SIGRETURN */)
{
- uctx = (ucontext_t *) gprs[3];
- is_vector = (uctx->uc_mcsize == UC_FLAVOR64_VEC_SIZE
- || uctx->uc_mcsize == UC_FLAVOR_VEC_SIZE);
- is_64 = (uctx->uc_mcsize == UC_FLAVOR64_VEC_SIZE
- || uctx->uc_mcsize == UC_FLAVOR64_SIZE);
+ uctx = (struct gcc_ucontext *) gprs[3];
+ is_vector = (uctx->mcsize == UC_FLAVOR64_VEC_SIZE
+ || uctx->mcsize == UC_FLAVOR_VEC_SIZE);
+ is_64 = (uctx->mcsize == UC_FLAVOR64_VEC_SIZE
+ || uctx->mcsize == UC_FLAVOR64_SIZE);
}
else if (gprs[0] == 0 && gprs[3] == 184)
{
int ctxstyle = gprs[5];
- uctx = (ucontext_t *) gprs[4];
+ uctx = (struct gcc_ucontext *) gprs[4];
is_vector = (ctxstyle == UC_FLAVOR_VEC || ctxstyle == UC_FLAVOR64_VEC
|| ctxstyle == UC_TRAD_VEC || ctxstyle == UC_TRAD64_VEC);
is_64 = (ctxstyle == UC_FLAVOR64_VEC || ctxstyle == UC_TRAD64_VEC
@@ -325,11 +366,10 @@ handle_syscall (_Unwind_FrameState *fs, const reg_unit gprs[32],
/* The context is 64-bit, but it doesn't carry any extra information
for us because only the low 32 bits of the registers are
call-saved. */
- struct gcc_mcontext64 *m64 = (struct gcc_mcontext64 *)uctx->uc_mcontext;
+ struct gcc_mcontext64 *m64 = (struct gcc_mcontext64 *)uctx->mcontext;
int i;
- float_state = &m64->fs;
- vector_state = &m64->vs;
+ float_vector_state = &m64->fvs;
new_cfa = m64->gpr[1][1];
@@ -354,33 +394,32 @@ handle_syscall (_Unwind_FrameState *fs, const reg_unit gprs[32],
}
else
{
- struct mcontext *m = uctx->uc_mcontext;
+ struct gcc_mcontext32 *m = uctx->mcontext;
int i;
- float_state = &m->fs;
- vector_state = &m->vs;
+ float_vector_state = &m->fvs;
- new_cfa = m->ss.r1;
+ new_cfa = m->gpr[1];
- set_offset (CR2_REGNO, &m->ss.cr);
+ set_offset (CR2_REGNO, &m->cr);
for (i = 0; i < 32; i++)
- set_offset (i, &m->ss.r0 + i);
- set_offset (XER_REGNO, &m->ss.xer);
- set_offset (LINK_REGISTER_REGNUM, &m->ss.lr);
- set_offset (COUNT_REGISTER_REGNUM, &m->ss.ctr);
+ set_offset (i, m->gpr + i);
+ set_offset (XER_REGNO, &m->xer);
+ set_offset (LINK_REGISTER_REGNUM, &m->lr);
+ set_offset (COUNT_REGISTER_REGNUM, &m->ctr);
if (is_vector)
- set_offset (VRSAVE_REGNO, &m->ss.vrsave);
+ set_offset (VRSAVE_REGNO, &m->vrsave);
/* Sometimes, srr0 points to the instruction that caused the exception,
and sometimes to the next instruction to be executed; we want
the latter. */
- if (m->es.exception == 3 || m->es.exception == 4
- || m->es.exception == 6
- || (m->es.exception == 7 && !(m->ss.srr1 & 0x10000)))
- return_addr = m->ss.srr0 + 4;
+ if (m->exception == 3 || m->exception == 4
+ || m->exception == 6
+ || (m->exception == 7 && !(m->srr1 & 0x10000)))
+ return_addr = m->srr0 + 4;
else
- return_addr = m->ss.srr0;
+ return_addr = m->srr0;
}
fs->cfa_how = CFA_REG_OFFSET;
@@ -399,14 +438,14 @@ handle_syscall (_Unwind_FrameState *fs, const reg_unit gprs[32],
set_offset (ARG_POINTER_REGNUM, &return_addr);
for (i = 0; i < 32; i++)
- set_offset (32 + i, float_state->fpregs + i);
- set_offset (SPEFSCR_REGNO, &float_state->fpscr);
+ set_offset (32 + i, float_vector_state->fpregs + i);
+ set_offset (SPEFSCR_REGNO, &float_vector_state->fpscr);
if (is_vector)
{
for (i = 0; i < 32; i++)
- set_offset (FIRST_ALTIVEC_REGNO + i, vector_state->save_vr + i);
- set_offset (VSCR_REGNO, vector_state->save_vscr);
+ set_offset (FIRST_ALTIVEC_REGNO + i, float_vector_state->save_vr + i);
+ set_offset (VSCR_REGNO, float_vector_state->save_vscr);
}
return true;
diff --git a/gcc/config/rs6000/darwin-ldouble.c b/gcc/config/rs6000/darwin-ldouble.c
index 86893c961cd..b394d5d99bf 100644
--- a/gcc/config/rs6000/darwin-ldouble.c
+++ b/gcc/config/rs6000/darwin-ldouble.c
@@ -67,7 +67,7 @@ extern long double __gcc_qsub (double, double, double, double);
extern long double __gcc_qmul (double, double, double, double);
extern long double __gcc_qdiv (double, double, double, double);
-#ifdef __ELF__
+#if defined __ELF__ && defined SHARED
/* Provide definitions of the old symbol names to statisfy apps and
shared libs built against an older libgcc. To access the _xlq
symbols an explicit version reference is needed, so these won't
diff --git a/gcc/config/rs6000/darwin-tramp.asm b/gcc/config/rs6000/darwin-tramp.asm
index 64902116ff2..a2e2f4c7434 100644
--- a/gcc/config/rs6000/darwin-tramp.asm
+++ b/gcc/config/rs6000/darwin-tramp.asm
@@ -33,8 +33,6 @@
* executable file might be covered by the GNU General Public License.
*/
-/* APPLE LOCAL mainline throughout this file */
-
#include "darwin-asm.h"
/* Set up trampolines. */
diff --git a/gcc/config/rs6000/darwin.h b/gcc/config/rs6000/darwin.h
index 51a52ef026e..325fb5e761f 100644
--- a/gcc/config/rs6000/darwin.h
+++ b/gcc/config/rs6000/darwin.h
@@ -56,8 +56,9 @@
if (TARGET_64BIT) builtin_define ("__ppc64__"); \
builtin_define ("__POWERPC__"); \
builtin_define ("__NATURAL_ALIGNMENT__"); \
- builtin_define ("__MACH__"); \
- builtin_define ("__APPLE__"); \
+ /* APPLE LOCAL remove __MACH__ and __APPLE__ definitions -- put elsewhere */\
+ /* APPLE LOCAL constant cfstrings */ \
+ SUBTARGET_OS_CPP_BUILTINS (); \
} \
while (0)
@@ -106,6 +107,10 @@ do { \
error ("invalid option %qs", base); \
darwin_fix_and_continue = (base[0] != 'n'); \
} \
+ /* APPLE LOCAL begin longcall */ \
+ if (TARGET_64BIT) \
+ rs6000_longcall_switch = (char *)0; \
+ /* APPLE LOCAL end longcall */ \
} \
if (TARGET_64BIT && ! TARGET_POWERPC64) \
{ \
@@ -126,9 +131,12 @@ do { \
the kernel or some such. */
#define CC1_SPEC "\
+"/* APPLE LOCAL ignore -msse and -msse2 and other x86 options */"\
+%<msse %<msse2 %<march=pentium4 %<mcpu=pentium4 \
%{g: %{!fno-eliminate-unused-debug-symbols: -feliminate-unused-debug-symbols }} \
%{static: %{Zdynamic: %e conflicting code gen style switches are used}}\
-%{!static:%{!mdynamic-no-pic:-fPIC}}"
+"/* APPLE LOCAL -fast and PIC code. */"\
+%{!static:%{!fast:%{!fastf:%{!fastcp:%{!mdynamic-no-pic:-fPIC}}}}}"
#define DARWIN_SUBARCH_SPEC " \
%{m64: ppc64} \
@@ -165,7 +173,8 @@ do { \
#define SUBTARGET_OPTION_TRANSLATE_TABLE \
{ "-ffix-and-continue", "-mfix-and-continue" }, \
{ "-findirect-data", "-mfix-and-continue" }, \
- { "-faltivec", "-maltivec -include altivec.h" }, \
+ /* APPLE LOCAL AltiVec */ \
+ { "-faltivec", "-faltivec -mpim-altivec" }, \
{ "-fno-altivec", "-mno-altivec" }, \
{ "-Waltivec-long-deprecated", "-mwarn-altivec-long" }, \
{ "-Wno-altivec-long-deprecated", "-mno-warn-altivec-long" }
@@ -182,6 +191,24 @@ do { \
#undef RS6000_PIC_OFFSET_TABLE_REGNUM
#define RS6000_PIC_OFFSET_TABLE_REGNUM 31
+/* APPLE LOCAL begin -pg fix */
+/* -pg has a problem which is normally concealed by -fPIC;
+ either -mdynamic-no-pic or -static exposes the -pg problem, causing the
+ crash. FSF gcc for Darwin also has this bug. The problem is that -pg
+ causes several int registers to be saved and restored although they may
+ not actually be used (config/rs6000/rs6000.c:first_reg_to_save()). In the
+ rare case where none of them is actually used, a consistency check fails
+ (correctly). This cannot happen with -fPIC because the PIC register (R31)
+ is always "used" in the sense checked by the consistency check. The
+ easy fix, here, is therefore to mark R31 always "used" whenever -pg is on.
+ A better, but harder, fix would be to improve -pg's register-use
+ logic along the lines suggested by comments in the function listed above. */
+#undef PIC_OFFSET_TABLE_REGNUM
+#define PIC_OFFSET_TABLE_REGNUM ((flag_pic || profile_flag) \
+ ? RS6000_PIC_OFFSET_TABLE_REGNUM \
+ : INVALID_REGNUM)
+/* APPLE LOCAL end -pg fix */
+
/* Pad the outgoing args area to 16 bytes instead of the usual 8. */
#undef STARTING_FRAME_OFFSET
@@ -200,11 +227,35 @@ do { \
#define UNLIKELY_EXECUTED_TEXT_SECTION_NAME \
"__TEXT,__unlikely,regular,pure_instructions"
+/* APPLE LOCAL begin long call hot cold */
+/* The following is used by hot/cold partitioning to determine whether to
+ unconditional branches are "long enough" to span the distance between
+ hot and cold sections (otherwise we have to use indirect jumps). It
+ is set based on the -mlongcall flag.
+ If -mlongcall is set, we use the indirect jumps (the macro below gets '0');
+ otherwise we use unconditional branches (the macro below gets '1'). */
+#define HAS_LONG_UNCOND_BRANCH (TARGET_LONG_BRANCH ? 0 : 1)
+/* APPLE LOCAL end long call hot cold */
+
+/* APPLE LOCAL begin long-branch */
/* Define cutoff for using external functions to save floating point.
- Currently on Darwin, always use inline stores. */
+ For Darwin, use the function for more than a few registers. */
+
+/* APPLE LOCAL begin inline FP save/restore (radar 3414605) */
+#undef FP_SAVE_INLINE
+#define FP_SAVE_INLINE(FIRST_REG) \
+(optimize >= 3 \
+|| ((FIRST_REG) > 60 && (FIRST_REG) < 64) \
+|| TARGET_LONG_BRANCH)
+/* APPLE LOCAL end inline FP save/restore (radar 3414605) */
+
+/* Define cutoff for using external functions to save vector registers. */
-#undef FP_SAVE_INLINE
-#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 64)
+#undef VECTOR_SAVE_INLINE
+#define VECTOR_SAVE_INLINE(FIRST_REG) \
+ (((FIRST_REG) >= LAST_ALTIVEC_REGNO - 1 && (FIRST_REG) <= LAST_ALTIVEC_REGNO) \
+ || TARGET_LONG_BRANCH)
+/* APPLE LOCAL end long-branch */
/* Darwin uses a function call if everything needs to be saved/restored. */
#undef WORLD_SAVE_P
@@ -271,11 +322,8 @@ do { \
#undef ASM_COMMENT_START
#define ASM_COMMENT_START ";"
-/* FP save and restore routines. */
-#define SAVE_FP_PREFIX "._savef"
-#define SAVE_FP_SUFFIX ""
-#define RESTORE_FP_PREFIX "._restf"
-#define RESTORE_FP_SUFFIX ""
+/* APPLE LOCAL reduce code size */
+/* Don't define SAVE_FP_PREFIX and friends */
/* This is how to output an assembler line that says to advance
the location counter to a multiple of 2**LOG bytes using the
@@ -292,6 +340,22 @@ do { \
fprintf (FILE, "\t.align32 %d,0x60000000\n", (LOG)); \
} while (0)
+
+/* APPLE LOCAL begin -falign-loops-max-skip */
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* This is supported in cctools 465 and later. The macro test
+ above prevents using it in earlier build environments. */
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ if ((LOG) != 0) \
+ { \
+ if ((MAX_SKIP) == 0) \
+ fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else \
+ fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
+ }
+#endif
+/* APPLE LOCAL end -falign-loops-max-skip */
+
/* Generate insns to call the profiler. */
#define PROFILE_HOOK(LABEL) output_profile_hook (LABEL)
@@ -353,30 +417,46 @@ do { \
? GENERAL_REGS \
: (CLASS))
-/* Fix for emit_group_load (): force large constants to be pushed via regs. */
-#define ALWAYS_PUSH_CONSTS_USING_REGS_P 1
-
-/* This now supports a natural alignment mode */
-/* Darwin word-aligns FP doubles but doubleword-aligns 64-bit ints. */
-#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
- (TARGET_ALIGN_NATURAL ? (COMPUTED) : \
- (TYPE_MODE (TREE_CODE (TREE_TYPE (FIELD)) == ARRAY_TYPE \
- ? get_inner_array_type (FIELD) \
- : TREE_TYPE (FIELD)) == DFmode \
- ? MIN ((COMPUTED), 32) : (COMPUTED)))
-
-/* Darwin increases natural record alignment to doubleword if the first
- field is an FP double while the FP fields remain word aligned. */
-#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
- ((TREE_CODE (STRUCT) == RECORD_TYPE \
- || TREE_CODE (STRUCT) == UNION_TYPE \
- || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
- && TARGET_ALIGN_NATURAL == 0 \
- ? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \
- : (TREE_CODE (STRUCT) == VECTOR_TYPE \
- && ALTIVEC_VECTOR_MODE (TYPE_MODE (STRUCT))) \
- ? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \
- : MAX ((COMPUTED), (SPECIFIED)))
+/* APPLE LOCAL begin Macintosh alignment 2002-2-26 --ff */
+/* This now supports the Macintosh power, mac68k, and natural
+ alignment modes. It now has one more parameter than the standard
+ version of the ADJUST_FIELD_ALIGN macro.
+
+ The macro works as follows: We use the computed alignment of the
+ field if we are in the natural alignment mode or if the field is
+ a vector. Otherwise, if we are in the mac68k alignment mode, we
+ use the minimum of the computed alignment and 16 (pegging at
+ 2-byte alignment). If we are in the power mode, we peg at 32
+ (word alignment) unless it is the first field of the struct, in
+ which case we use the computed alignment. */
+#undef ADJUST_FIELD_ALIGN
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED, FIRST_FIELD_P) \
+ (TARGET_ALIGN_NATURAL ? (COMPUTED) : \
+ (((COMPUTED) == RS6000_VECTOR_ALIGNMENT) \
+ ? RS6000_VECTOR_ALIGNMENT \
+ : (MIN ((COMPUTED), \
+ (TARGET_ALIGN_MAC68K ? 16 \
+ : ((FIRST_FIELD_P) ? (COMPUTED) \
+ : 32))))))
+
+#undef ROUND_TYPE_ALIGN
+/* Macintosh alignment modes require more complicated handling
+ of alignment, so we replace the macro with a call to a
+ out-of-line function. */
+union tree_node;
+extern unsigned round_type_align (union tree_node*, unsigned, unsigned); /* rs6000.c */
+#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
+ round_type_align(STRUCT, COMPUTED, SPECIFIED)
+/* APPLE LOCAL end Macintosh alignment 2002-2-26 --ff */
+
+/* APPLE LOCAL begin alignment */
+/* Make sure local alignments come from the type node, not the mode;
+ mode-based alignments are wrong for vectors. */
+#undef LOCAL_ALIGNMENT
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
+ (MIN (BIGGEST_ALIGNMENT, \
+ MAX ((unsigned) ALIGN, TYPE_ALIGN (TYPE))))
+/* APPLE LOCAL end alignment */
/* Specify padding for the last element of a block move between
registers and memory. FIRST is nonzero if this is the only
@@ -388,6 +468,17 @@ do { \
support 64 bit PowerPC either, so this just keeps things happy. */
#define DOUBLE_INT_ASM_OP "\t.quad\t"
+/* APPLE LOCAL begin branch cost */
+#undef BRANCH_COST
+/* Better code is generated by saying conditional branches take 1 tick. */
+#define BRANCH_COST 1
+/* APPLE LOCAL end branch cost */
+
+/* APPLE LOCAL begin indirect calls in R12 */
+/* Address of indirect call must be computed here */
+#define MAGIC_INDIRECT_CALL_REG 12
+/* APPLE LOCAL end indirect calls in R12 */
+
/* For binary compatibility with 2.95; Darwin C APIs use bool from
stdbool.h, which was an int-sized enum in 2.95. Users can explicitly
choose to have sizeof(bool)==1 with the -mone-byte-bool switch. */
@@ -397,6 +488,15 @@ extern const char *darwin_one_byte_bool;
#undef REGISTER_TARGET_PRAGMAS
#define REGISTER_TARGET_PRAGMAS DARWIN_REGISTER_TARGET_PRAGMAS
+/* APPLE LOCAL begin mainline 2005-04-11 */
+#undef LIBGCC_SPEC
+#undef REAL_LIBGCC_SPEC
+#define REAL_LIBGCC_SPEC \
+ "%{static|static-libgcc:-lgcc -lgcc_eh; \
+ :%{shared-libgcc|Zdynamiclib:%{m64:-lgcc_s_ppc64;:-lgcc_s} -lgcc; \
+ :-lgcc -lgcc_eh}}"
+/* APPLE LOCAL end mainline 2005-04-11 */
+
#ifdef IN_LIBGCC2
#include <stdbool.h>
#endif
@@ -405,10 +505,12 @@ extern const char *darwin_one_byte_bool;
#define HAS_MD_FALLBACK_FRAME_STATE_FOR 1
+/* APPLE LOCAL begin mainline to be accessed, 5 nops */
/* True, iff we're generating fast turn around debugging code. When
- true, we arrange for function prologues to start with 4 nops so
- that gdb may insert code to redirect them, and for data to accessed
- indirectly. The runtime uses this indirection to forward
+ true, we arrange for function prologues to start with 5 nops so
+ that gdb may insert code to redirect them, and for data to be
+ accessed indirectly. The runtime uses this indirection to forward
references for data to the original instance of that data. */
+/* APPLE LOCAL end mainline to be accessed, 5 nops */
#define TARGET_FIX_AND_CONTINUE (darwin_fix_and_continue)
diff --git a/gcc/config/rs6000/darwin.md b/gcc/config/rs6000/darwin.md
index a0499c5c7aa..0e69db2df83 100644
--- a/gcc/config/rs6000/darwin.md
+++ b/gcc/config/rs6000/darwin.md
@@ -449,3 +449,61 @@ Boston, MA 02111-1307, USA. */
[(set_attr "type" "branch")
(set_attr "length" "4")])
+/* APPLE LOCAL begin 64-bit */
+(define_insn "*save_fpregs_with_label_di"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:DI 1 "register_operand" "=l"))
+ (use (match_operand:DI 2 "call_operand" "s"))
+ (use (match_operand:DI 3 "" ""))
+ (set (match_operand:DF 4 "memory_operand" "=m")
+ (match_operand:DF 5 "gpc_reg_operand" "f"))])]
+ "TARGET_64BIT"
+ "*
+#if TARGET_MACHO
+ const char *picbase = machopic_function_base_name ();
+ operands[3] = gen_rtx_SYMBOL_REF (Pmode, ggc_alloc_string (picbase, -1));
+#endif
+ return \"bl %z2\\n%3:\";
+"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*save_vregs_di"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:DI 1 "register_operand" "=l"))
+ (use (match_operand:DI 2 "call_operand" "s"))
+ (set (match_operand:V4SI 3 "any_operand" "=m")
+ (match_operand:V4SI 4 "register_operand" "v"))])]
+ "TARGET_64BIT"
+ "bl %z2"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*restore_vregs_di"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:DI 1 "register_operand" "=l"))
+ (use (match_operand:DI 2 "call_operand" "s"))
+ (clobber (match_operand:DI 3 "gpc_reg_operand" "=r"))
+ (set (match_operand:V4SI 4 "register_operand" "=v")
+ (match_operand:V4SI 5 "any_operand" "m"))])]
+ "TARGET_64BIT"
+ "bl %z2")
+
+(define_insn "*save_vregs_with_label_di"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:DI 1 "register_operand" "=l"))
+ (use (match_operand:DI 2 "call_operand" "s"))
+ (use (match_operand:DI 3 "" ""))
+ (set (match_operand:V4SI 4 "any_operand" "=m")
+ (match_operand:V4SI 5 "register_operand" "v"))])]
+ "TARGET_64BIT"
+ "*
+#if TARGET_MACHO
+ const char *picbase = machopic_function_base_name ();
+ operands[3] = gen_rtx_SYMBOL_REF (Pmode, ggc_alloc_string (picbase, -1));
+#endif
+ return \"bl %z2\\n%3:\";
+"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+/* APPLE LOCAL end 64-bit */
diff --git a/gcc/config/rs6000/host-darwin.c b/gcc/config/rs6000/host-darwin.c
index 599e30603d6..f16a3ddb2b0 100644
--- a/gcc/config/rs6000/host-darwin.c
+++ b/gcc/config/rs6000/host-darwin.c
@@ -1,5 +1,7 @@
/* Darwin/powerpc host-specific hook definitions.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ APPLE LOCAL begin mainline 2005-04-06 4071679
+ Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+ APPLE LOCAL end mainline 2005-04-06 4071679
This file is part of GCC.
@@ -23,11 +25,14 @@
#include "coretypes.h"
#include <signal.h>
#include <sys/ucontext.h>
-#include <sys/mman.h>
+/* APPLE LOCAL mainline 2005-04-06 4071679 */
+/* Delete mman.h */
#include "hosthooks.h"
#include "hosthooks-def.h"
#include "toplev.h"
#include "diagnostic.h"
+/* APPLE LOCAL mainline 2005-04-06 4071679 */
+#include "config/host-darwin.h"
static void segv_crash_handler (int);
static void segv_handler (int, siginfo_t *, void *);
@@ -137,65 +142,7 @@ darwin_rs6000_extra_signals (void)
fatal_error ("While setting up signal handler: %m");
}
-#undef HOST_HOOKS_GT_PCH_GET_ADDRESS
-#define HOST_HOOKS_GT_PCH_GET_ADDRESS darwin_rs6000_gt_pch_get_address
-#undef HOST_HOOKS_GT_PCH_USE_ADDRESS
-#define HOST_HOOKS_GT_PCH_USE_ADDRESS darwin_rs6000_gt_pch_use_address
-
-/* Yes, this is really supposed to work. */
-static char pch_address_space[1024*1024*1024] __attribute__((aligned (4096)));
-
-/* Return the address of the PCH address space, if the PCH will fit in it. */
-
-static void *
-darwin_rs6000_gt_pch_get_address (size_t sz, int fd ATTRIBUTE_UNUSED)
-{
- if (sz <= sizeof (pch_address_space))
- return pch_address_space;
- else
- return NULL;
-}
-
-/* Check ADDR and SZ for validity, and deallocate (using munmap) that part of
- pch_address_space beyond SZ. */
-
-static int
-darwin_rs6000_gt_pch_use_address (void *addr, size_t sz, int fd, size_t off)
-{
- const size_t pagesize = getpagesize();
- void *mmap_result;
- int ret;
-
- if ((size_t)pch_address_space % pagesize != 0
- || sizeof (pch_address_space) % pagesize != 0)
- abort ();
-
- ret = (addr == pch_address_space && sz <= sizeof (pch_address_space));
- if (! ret)
- sz = 0;
-
- /* Round the size to a whole page size. Normally this is a no-op. */
- sz = (sz + pagesize - 1) / pagesize * pagesize;
-
- if (munmap (pch_address_space + sz, sizeof (pch_address_space) - sz) != 0)
- fatal_error ("couldn't unmap pch_address_space: %m\n");
-
- if (ret)
- {
- mmap_result = mmap (addr, sz,
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
- fd, off);
-
- /* The file might not be mmap-able. */
- ret = mmap_result != (void *) MAP_FAILED;
-
- /* Sanity check for broken MAP_FIXED. */
- if (ret && mmap_result != addr)
- abort ();
- }
-
- return ret;
-}
-
+/* APPLE LOCAL mainline 2005-04-06 4071679 */
+/* Delete PCH functions & macros */
const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc/config/rs6000/ops-to-gp b/gcc/config/rs6000/ops-to-gp
new file mode 100755
index 00000000000..becb406749b
--- /dev/null
+++ b/gcc/config/rs6000/ops-to-gp
@@ -0,0 +1,620 @@
+#!/bin/sh
+# APPLE LOCAL file AltiVec
+# ops-to-gp -gcc vec.ops builtin.ops
+# Creates vec.h used by rs6000.c
+
+arg0=`basename $0`
+err() {
+ echo "$arg0: $*" 1>&2
+ exit 2
+}
+
+if [ $# -eq 0 ] ; then
+ echo "Usage: $arg0 [ -mcc | -gcc ] builtin-ops ..." 1>&2
+ exit 1
+fi
+
+MCC=1
+GCC=0
+suffix="gp"
+if [ "$1" = "-mcc" ] ; then
+ shift;
+elif [ "$1" = "-gcc" ] ; then
+ GCC=1
+ MCC=0
+ suffix="h"
+ shift;
+fi
+
+output=`basename $1 .ops`
+gperf="gperf -G -a -o -k1-15 -p -t -D -T -N Is_Builtin_Function $output.gp";
+
+# Lines in the ops file have the form
+# @ @ betype betype-code type-spelling
+# @ fetype betype [code]
+# @ @ @ instruction type
+# generic op1 op2 ... opn = result specific when configure [addressible
+# [instruction [const_ptr_ok [volatile_ptr_ok [transform [predicate]]]]]]
+
+# Sort the ops file to put it in a canonical order.
+sort -u $* | \
+
+# Add specific function uid's, make generic functions from specific
+# functions, validate the types used, compute default parameters, and
+# compute parts of the default transform and predicate functions.
+awk 'BEGIN {
+ i = 0
+ EQ = i++
+ RESULT = i++
+ SPECIFIC = i++
+ WHEN = i++
+ CONFIGURED = i++
+ ADDRESSIBLE = i++
+ INSTRUCTION = i++
+ CONST_PTR_OK = i++
+ VOLATILE_PTR_OK = i++
+ TRANSFORM = i++
+ PREDICATE = i++
+ n_lines = 1;
+ tree[3] = "Make_Folded_4tree";
+ tree[2] = "Make_Folded_3tree";
+ tree[1] = "Make_Folded_Btree";
+ tree[0] = "Make_Utree";
+ optimize["vec_sub"] = 1;
+ optimize["vec_subs"] = 1;
+ optimize["vec_xor"] = 1;
+ optimize["vec_andc"] = 1;
+ optimize["vec_avg"] = 2;
+ optimize["vec_or"] = 2;
+ optimize["vec_and"] = 2;
+ optimize["vec_max"] = 2;
+ optimize["vec_min"] = 2;
+ optimize["vec_sld"] = 3;
+ optimize["vec_splat_s8"] = 4;
+ optimize["vec_splat_s16"] = 5;
+ optimize["vec_splat_s32"] = 6;
+ optimize["vec_splat_u8"] = 4;
+ optimize["vec_splat_u16"] = 5;
+ optimize["vec_splat_u32"] = 6;
+ optimize["vec_cmpeq"] = 7;
+ optimize["vec_lvsl"] = 8;
+ optimize["vec_lvsr"] = 9;
+ # These operations need additional transformation. Key off the
+ # optimize attribute to identify them.
+ optimize["vec_cmplt"] = 10;
+ optimize["vec_cmple"] = 10;
+ optimize["vec_abs"] = 11;
+ optimize["vec_abss"] = 11;
+ }
+ function no_type(t) {
+ printf "%% Error: type %s not declared.\n", t;
+ status = 1;
+ exit;
+ }
+ # Record the type.
+ $1 == "@" {
+ if ($2 == "@") {
+ if ($3 == "@") {
+ # Definition of an instruction.
+ insn_type[$4] = $5; # type
+ } else {
+ # Definition of a betype.
+ becode[$3] = $4; # betype-code
+ bespell[$3] = $5; # type-spelling
+ gsub(/\=/, " ", bespell[$3]);
+ }
+ } else {
+ # Definition of a fetype.
+ print $0;
+ if (!becode[$3]) no_type($3); # Must have defined the betype.
+ betype[$2] = $3; # betype;
+ if (NF == 3)
+ code[$2] = "";
+ else
+ code[$2] = $4; # code
+ }
+ }
+ function no_equal(i,l) {
+ printf "%% Syntax error %d: %s\n", i, l;
+ status = 1;
+ exit;
+ }
+ function error(f,a) {
+ printf( ("%% error: " f), a);
+ status = 1;
+ exit;
+ }
+ # Ignore comment lines.
+ $1 != "#" && $1 != "@" {
+ # Generate the signature of the specific function, the predicate,
+ # the transform, the arguments to the transform function, the
+ # arguments to the predicate function, and the spelling of the
+ # function type.
+ signature = "";
+ predicate = "";
+ transform = "";
+ insn_code = "";
+ transform_args = "";
+ predicate_args = "";
+ function_type = "";
+ # First, consider the parameter types.
+ for (i = 2; $i != "=" && i < NF; i++) {
+ if ($i != "...") {
+ if (!betype[$i]) no_type($i);
+ signature = (signature " " $i);
+ predicate = (predicate "_" betype[$i]);
+ transform = (transform code[$i]);
+ transform_args = (transform_args ", ND_kid(t," i-1 ")");
+ predicate_args = (predicate_args " " becode[betype[$i]]);
+ if (function_type)
+ function_type = (function_type ", " bespell[betype[$i]]);
+ else
+ function_type = bespell[betype[$i]];
+ }
+ }
+ constraints = (transform "@");
+ # Check the syntax of the ops file.
+ if ($i != "=" || NF > i+PREDICATE || NF < i+CONFIGURE) no_equal(i,$0);
+ if (!betype[$(i+RESULT)]) no_type($(i+RESULT));
+ # Incorporate the result type.
+ if (i == 2) {
+ predicate = "_void";
+ function_type = "void";
+ }
+ signature = ($(i+SPECIFIC) signature);
+ predicate = sprintf("is_%s_func%s", betype[$(i+RESULT)], predicate);
+ predicate_args = (becode[betype[$(i+RESULT)]] predicate_args);
+ function_type = sprintf("(%s (*)(%s))", bespell[betype[$(i+RESULT)]], \
+ function_type);
+ if (substr(code[$(i+RESULT)], 1, 1) == "j") {
+ # Handle a jump asm. The code is expedted to be
+ # j={cc-bit-num}={cc-bit-value}[={r|d}]. The operation must have
+ # one operand if the code d is used and two operands otherwise.
+ # The transform function can implement the r code by reversing the
+ # two operands. In all cases, the first operand is a computed
+ # constant encoding both the bit number and the test.
+ n = split(code[$(i+RESULT)], jmp, "=");
+ if (jmp[n] == "d" && i != 3) error("%d operands", i-2);
+ if (jmp[n] != "d" && i != 4) error("%d operands", i-2);
+ if (jmp[n] == "r")
+ transform_args = ", ND_kid(t,2), ND_kid(t,1)";
+ transform_args = sprintf("%s(OP_VCMP%s%s", tree[i-2], \
+ toupper(jmp[3]), transform_args);
+ if (jmp[n] == "r")
+ transform = ("r" transform);
+ insn_code = sprintf("CODE_FOR_j_%d_%s_f%s", jmp[2], jmp[3], \
+ transform);
+ transform = sprintf("transform_j_%d_%s_f%s", jmp[2], jmp[3], \
+ transform);
+ } else {
+ transform_args = sprintf("%s(OP_%sASM%s%s", tree[i-2], \
+ toupper(code[$(i+RESULT)]), \
+ toupper(transform), transform_args);
+ insn_code = sprintf("CODE_FOR_%sf%s", code[$(i+RESULT)], transform);
+ transform = sprintf("transform_%sf%s", code[$(i+RESULT)], transform);
+ }
+ # Give a unique id to the signature
+ if (count[signature] == 0)
+ count[signature] = ++uid[$(i+SPECIFIC)];
+
+ # Compute the default instruction name
+ nf = split($(i+SPECIFIC), part, "_");
+ instruction = ("MOP_" part[nf]);
+
+ # Compute the insn_code, but use the instruction override if given.
+ if (NF >= i+INSTRUCTION)
+ instruction = $(i+INSTRUCTION);
+ if (insn_type[instruction])
+ insn_code = (insn_code "_" insn_type[instruction]);
+
+ # Allow the user to override the addressibility, instruction,
+ # const_ptr_ok, volatile_ptr_ok, transform, and predicate.
+ if (NF >= i+ADDRESSIBLE)
+ addressible = "";
+ else
+ addressible = "FALSE";
+
+ if (NF >= i+INSTRUCTION)
+ instruction = "";
+ else if (substr($1, 1, 4) == "vec_")
+ print "@ @3", instruction;
+
+ if (NF >= i+CONST_PTR_OK)
+ const_ptr_ok = "";
+ else
+ const_ptr_ok = "FALSE";
+
+ if (NF >= i+VOLATILE_PTR_OK)
+ volatile_ptr_ok = "";
+ else
+ volatile_ptr_ok = "FALSE";
+
+ if (NF >= i+TRANSFORM)
+ transform = "";
+ else
+ print "@ @1", transform, transform_args;
+
+ if (NF >= i+PREDICATE)
+ predicate = "";
+ else
+ print "@ @2", i-2, predicate, predicate_args, function_type;
+
+ if (optimize[$1])
+ optimize_method = optimize[$1];
+ else
+ optimize_method = "0";
+
+ # Record the line, addressibility, instruction, transform,
+ # predicate, and unique id.
+ line[n_lines++] = ($0 " " addressible " " instruction " " \
+ const_ptr_ok " " volatile_ptr_ok " " transform " " \
+ predicate " " insn_code " " constraints " " \
+ optimize_method " " count[signature]);
+ }
+ END {
+ if (status) exit;
+ # generic op1 op2 ... opn = result specific when configured
+ # addressable instruction const_ptr_ok volatile_ptr_ok
+ # transform predicate insn_code constraints optimize uid
+ SPECIFIC = 12
+ for (i = 1; i < n_lines; i++) {
+ nf = split(line[i], part);
+ specific = part[nf-SPECIFIC];
+
+ # Print the generic form.
+ printf "%s", part[1];
+ for (j = 2; j <= nf-SPECIFIC; j++) printf " %s", part[j];
+ if (uid[specific] > 1) printf ":%d", part[nf];
+ while (j < nf) printf " %s", part[j++];
+ printf "\n";
+
+ # Print the specific form.
+ printf "%s", specific;
+ for (j = 2; j <= nf-SPECIFIC; j++) printf " %s", part[j];
+ if (uid[specific] > 1) printf ":%d", part[nf];
+ while (j < nf) printf " %s", part[j++];
+ printf "\n";
+ }
+ }' | \
+
+# Strip out load and store qualifiers.
+sed -e 's/_load_op//g' -e 's/_store_op//g' | \
+
+# Sort the processed file and eliminate duplicates.
+sort -u | \
+
+# Append the count of each generic function to each line.
+awk 'function push() {
+ if (num)
+ for (i = 0; i < num; i++)
+ print line[i], num;
+ num = 0;
+ }
+ $1 == "@" {
+ print $0;
+ }
+ $1 != "@" {
+ if (last != $1)
+ push();
+ last = $1;
+ line[num++] = $0;
+ }
+ END {
+ push();
+ }' | \
+
+# Now compute the gperf input file.
+# Lines now have a fixed format
+# generic op1 ... opn = result specific instruction when configured
+# addressible const_ptr_ok volatile_ptr_ok transform predicate
+# insn_code constraints optimize count
+awk 'BEGIN {
+ MCC = '$MCC'
+ GCC = '$GCC'
+ i = 0;
+ COUNT = i++
+ OPTIMIZE = i++
+ CONSTRAINTS = i++
+ INSN_CODE = i++
+ PREDICATE = i++
+ TRANSFORM = i++
+ VOLATILE_PTR_OK = i++
+ CONST_PTR_OK = i++
+ INSTRUCTION = i++
+ ADDRESSIBLE = i++
+ CONFIGURED = i++
+ WHEN = i++
+ SPECIFIC = i++
+ RESULT = i++
+ EQ = i++
+ OPN = i++
+ NARGS = i++
+ if (MCC) {
+ print "%{";
+ print "/* Command-line: '"$gperf"' */";
+ MAXARGS = 5
+ }
+ if (GCC)
+ MAXARGS = 3
+ }
+ function write_test(tree, type, num) {
+ if (type == "PTR") {
+ printf "\n && TY_kind(%s) == KIND_POINTER", tree;
+ } else if (type == "I5") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && ((UINT32)Targ_To_Host(tc) + 16) < 32";
+ } else if (type == "U5") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && (UINT32)Targ_To_Host(tc) < 32";
+ } else if (type == "U4") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && (UINT32)Targ_To_Host(tc) < 16";
+ } else if (type == "U2") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && (UINT32)Targ_To_Host(tc) < 4";
+ } else if (type == "BETYPE_U4" || type == "BETYPE_I4") {
+ printf "\n && is_integer_type(%s)", tree;
+ } else {
+ printf "\n && Similar_Types(%s,", tree;
+ printf "\n\t\t Be_Type_Tbl(%s), IGNORE_QUALIFIERS)", type;
+ }
+ }
+ $1 == "@" {
+ if (MCC) {
+ if ($2 == "@1") {
+ # Write the predicate function from the given parameters.
+ # The format is:
+ # @ @1 transform_ifii Make_3tree(OP_IASMII, ND_kid(t,1), ND_kid(t,2)
+ print "";
+ print "/*ARGSUSED*/";
+ print "static void";
+ print $3 "(ND *func, ND *parent, ND *t, struct builtin *self)";
+ print "{";
+ printf " *t = *%s", $4;
+ for (i = 5; i <= NF; i++) printf " %s", $i;
+ print ",";
+ if (split($3,jmp,"_") == 5 && jmp[2] == "j")
+ printf "\t\t MK_I4CONST_ND((self->data << 5) + %d));\n", \
+ jmp[3];
+ else
+ print "\t\t MK_I4CONST_ND(self->data));";
+
+ print " Is_True(self->data > 0, (\"No implementation for %s\", self->name));";
+ print "}";
+ } else if ($2 == "@2") {
+ # Write the transform function from the given parameters.
+ # The format is:
+ # @ @2 2 is_int_func_int_int BETYPE_I4 BETYPE_I4 BETYPE_I4
+ # (int (*)(int, int))
+ print "";
+ print "/*ARGSUSED*/";
+ print "static BOOL";
+ print $4 "(ND *func, ND *parent, ND *t, struct builtin *self)";
+ print "{";
+ print " TCON tc;";
+ printf " if (ND_nkids(t) == %d", $3+1;
+ write_test("ST_type(ND_dec(func))", $5, "");
+ for (i = 1; i <= $3; i++) {
+ printf "\n && ND_name(ND_kid(t,%d)) == TO_VAL", i;
+ write_test(sprintf("The_Tree_Type(ND_kid(t,%d))", i), $(i+5), i);
+ }
+ print ")";
+ print " return TRUE;";
+ print " Error_Prt_Line (ND_linenum(t), ec_builtin_function_type, self->name,";
+ i = $3+6;
+ printf "\t\t \"%s", $i;
+ while (++i <= NF) printf " %s", $i;
+ print "\");";
+ print " return FALSE;";
+ print "}";
+ } else if ($2 == "@3") {
+ if (once++ == 0) printf "\n#ifndef HAVE_ALTIVEC\n";
+ printf "#define %s -1\n", $3;
+ } else {
+ if (once && twice++ == 0) printf "#endif /* HAVE_ALTIVEC */\n\n";
+ printf "extern struct a_type *T_%s;\n", $2;
+ }
+ }
+ next;
+ }
+ $1 == "%" {
+ print $0;
+ status = 1;
+ exit;
+ }
+ {
+ # Compute the signature of the generic function.
+ signature=$1;
+ for (i = 2; i <= NF-OPN; i++) {
+ if ($i != "...")
+ signature=(signature " " $i);
+ }
+
+ # Ensure that the signature is unique.
+ if (signature_line[signature]) {
+ print "Ambiguous signatures:";
+ print $0;
+ print line[signature_line[signature]];
+ }
+ signature_line[signature] = n_lines;
+
+ # Require that overloaded functions have the same attributes:
+ # number of arguments, when, configured, and addressible.
+ if (same_arg_count[$1] && same_arg_count[$1] != NF)
+ printf "%% number of arguments for %s varies: %d and %d\n", \
+ $1, NF-NARGS, same_arg_count[$1]-NARGS;
+ same_arg_count[$1] = NF;
+
+ if (same_when[$1] && same_when[$1] != $(NF-WHEN))
+ printf "%% when for %s varies: %s and %s\n", \
+ $1, $(NF-WHEN), same_when[$1];
+ same_when[$1] = $(NF-WHEN);
+
+ if (same_configured[$1] && same_configured[$1] != $(NF-CONFIGURED))
+ printf "%% configured for %s varies: %s and %s\n", \
+ $1, $(NF-CONFIGURED), same_configured[$1];
+ same_configured[$1] = $(NF-CONFIGURED);
+
+ if (same_addressible[$1] && same_addressible[$1] != $(NF-ADDRESSIBLE))
+ printf "%% addressible for %s varies: %s and %s\n", \
+ $1, $(NF-ADDRESSIBLE), same_addressible[$1];
+ else if (same_addressible[$1] && same_addressible[$1] != "FALSE")
+ printf "%% Overloaded function %s is addressible\n", $1
+ same_addressible[$1] = $(NF-ADDRESSIBLE);
+
+ # Record the line.
+ line[n_lines++] = $0;
+ }
+ function push(fcn, n) {
+ if (last) printf "};\n";
+ # Gcc3: declare as arrays of const pointers
+ if (fcn) printf "static const struct builtin *const O_%s[%d] = {\n", fcn, n;
+ last = fcn;
+ }
+ function mangle(name) {
+ if (split(name, names, ":") == 1)
+ return ("B_" names[1]);
+ return ("B" names[2] "_" names[1]);
+ }
+ END {
+ if (status) exit;
+
+ # Gcc3: Mark file as Apple local
+ printf "/* APPLE LOCAL file AltiVec */\n";
+ printf "/* This file is generated by ops-to-gp. Do not edit. */\n\n";
+ printf "/* To regenerate execute:\n";
+ printf " ops-to-gp -gcc vec.ops builtin.ops\n";
+ printf " with the current directory being gcc/config/rs6000. */\n\n";
+
+ # Output the description of each specific function.
+ uid = 0;
+ if (MCC) print "";
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[nf-SPECIFIC];
+ if (!done[fcn]) {
+ printf "static const struct builtin %s = {", mangle(fcn);
+ if (GCC) printf " {";
+ ellipsis = 1;
+ for (j = 2; j <= nf-OPN; j++)
+ if (part[j] != "...") {
+ printf " &T_%s,", part[j];
+ } else {
+ ellipsis = -1;
+ printf " NULL,";
+ }
+ while (j++ <= MAXARGS+1)
+ printf " NULL,";
+ instruction = part[nf-INSTRUCTION];
+ if (substr(instruction, 1, 4) == "MOP_")
+ instruction = substr(instruction, 5);
+ if (substr(instruction, length(instruction)) == "D")
+ instruction = (substr(instruction, 1, length(instruction) - 1) ".");
+ # Gcc3: Prefix each specific instruction with a "*"
+ if (match (instruction, "^[a-zA-Z]") > 0)
+ instruction = "*" instruction;
+ if (GCC) printf " },";
+ if (GCC) printf " \"%s\",", substr(part[nf-CONSTRAINTS], 1, length(part[nf-CONSTRAINTS]) - 1);
+ printf " &T_%s,", part[nf-RESULT];
+ if (MCC) printf " \"%s\",", part[nf-SPECIFIC];
+ printf " %d,", ellipsis * (nf - NARGS);
+ if (MCC) {
+ printf " %s,", part[nf-WHEN];
+ printf " %s,", part[nf-ADDRESSIBLE];
+ printf " %s,", part[nf-CONST_PTR_OK];
+ printf " %s,", part[nf-VOLATILE_PTR_OK];
+ printf " %s,", part[nf-CONFIGURED];
+ printf " %s,", part[nf-INSTRUCTION];
+ printf " %s,", part[nf-TRANSFORM];
+ printf " %s", part[nf-PREDICATE];
+ } else if (GCC) {
+ printf " %s,", part[nf-CONST_PTR_OK];
+ printf " %s,", part[nf-VOLATILE_PTR_OK];
+ printf " %s,", part[nf-OPTIMIZE];
+ printf " \"%s\",", part[nf-SPECIFIC];
+ printf " \"%s\",", instruction;
+ printf " %s,", part[nf-INSN_CODE];
+ printf " B_UID(%d)", uid++;
+ }
+ printf " };\n";
+ }
+ done[fcn] = 1;
+ }
+
+ if (GCC) printf "#define LAST_B_UID B_UID(%d)\n", uid;
+
+ if (GCC) {
+ # Output the description of each specific function.
+ print "";
+ uid = 0;
+ for (i in done)
+ done[i] = "";
+ print "const struct builtin * const Builtin[] = {"
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[nf-SPECIFIC];
+ if (!done[fcn]) {
+ printf " &%s,\n", mangle(fcn);
+ }
+ done[fcn] = 1;
+ }
+ print "};"
+ }
+
+ # Output the overload tables for each generic function.
+ print "";
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[1];
+ if (last != fcn)
+ push(fcn, part[nf]);
+ printf " &%s,\n", mangle(part[nf-SPECIFIC]);
+ }
+ push("", 0);
+
+ # Output the builtin function structure.
+ print "";
+ if (MCC) {
+ print "%}";
+ print "struct overloadx {";
+ print " char *name;";
+ print " int fcns;";
+ print " int args;";
+ print " struct builtin **functions;";
+ print "};";
+ print "%%";
+ } else if (GCC) {
+ print "const struct overloadx Overload[] = {";
+ }
+
+ # Output the builtin function list and data.
+ uid = 0;
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[1];
+ args = nf - NARGS;
+ if (part[nf-OPN] == "...") args = -args;
+ if (last != fcn) {
+ if (MCC) printf "%s, %d, %d, O_%s\n", fcn, part[nf], args, fcn;
+ if (GCC) printf " { \"%s\", %d, %d, O_%s, O_UID(%d) },\n", \
+ fcn, part[nf], args, fcn, uid++;
+ }
+ last = fcn;
+ }
+
+ if (GCC) {
+ print " { NULL, 0, 0, NULL, 0 }"
+ print "};";
+
+ printf "#define LAST_O_UID O_UID(%d)\n", uid;
+ }
+
+ }' > $output.$suffix
+
+if [ "$MCC" = "1" ] ; then
+ $gperf > $output.h
+fi
diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c
index 5d36d5d28b3..a231e1756b1 100644
--- a/gcc/config/rs6000/rs6000-c.c
+++ b/gcc/config/rs6000/rs6000-c.c
@@ -30,6 +30,17 @@
#include "c-pragma.h"
#include "errors.h"
#include "tm_p.h"
+/* APPLE LOCAL begin AltiVec */
+#include "c-common.h"
+#include "cpplib.h"
+#include "../libcpp/internal.h"
+#include "target.h"
+#include "options.h"
+
+static cpp_hashnode *altivec_categorize_keyword (const cpp_token *);
+static void init_vector_keywords (cpp_reader *pfile);
+/* APPLE LOCAL end AltiVec */
+
/* Handle the machine specific pragma longcall. Its syntax is
@@ -78,6 +89,138 @@ rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
#define builtin_define(TXT) cpp_define (pfile, TXT)
#define builtin_assert(TXT) cpp_assert (pfile, TXT)
+/* APPLE LOCAL begin AltiVec */
+/* Keep the AltiVec keywords handy for fast comparisons. */
+static GTY(()) cpp_hashnode *__vector_keyword;
+static GTY(()) cpp_hashnode *vector_keyword;
+static GTY(()) cpp_hashnode *__pixel_keyword;
+static GTY(()) cpp_hashnode *pixel_keyword;
+static GTY(()) cpp_hashnode *__bool_keyword;
+static GTY(()) cpp_hashnode *bool_keyword;
+static GTY(()) cpp_hashnode *_Bool_keyword;
+
+static GTY(()) cpp_hashnode *expand_bool_pixel; /* Preserved across calls. */
+
+static cpp_hashnode *
+altivec_categorize_keyword (const cpp_token *tok)
+{
+ if (tok->type == CPP_NAME)
+ {
+ cpp_hashnode *ident = tok->val.node;
+
+ if (ident == vector_keyword || ident == __vector_keyword)
+ return __vector_keyword;
+
+ if (ident == pixel_keyword || ident == __pixel_keyword)
+ return __pixel_keyword;
+
+ if (ident == bool_keyword || ident == _Bool_keyword
+ || ident == __bool_keyword)
+ return __bool_keyword;
+
+ return ident;
+ }
+
+ return 0;
+}
+
+/* Called to decide whether a conditional macro should be expanded.
+ Since we have exactly one such macro (i.e, 'vector'), we do not
+ need to examine the 'tok' parameter. */
+
+cpp_hashnode *
+rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
+{
+ static bool vector_keywords_init = false;
+ cpp_hashnode *expand_this = tok->val.node;
+ cpp_hashnode *ident;
+
+ if (!vector_keywords_init)
+ {
+ init_vector_keywords (pfile);
+ vector_keywords_init = true;
+ }
+
+ ident = altivec_categorize_keyword (tok);
+
+ if (ident == __vector_keyword)
+ {
+ tok = _cpp_peek_token (pfile, 0);
+ ident = altivec_categorize_keyword (tok);
+
+ if (ident == __pixel_keyword || ident == __bool_keyword)
+ {
+ expand_this = __vector_keyword;
+ expand_bool_pixel = ident;
+ }
+ else if (ident)
+ {
+ enum rid rid_code = (enum rid)(ident->rid_code);
+ if (ident->type == NT_MACRO)
+ {
+ (void)cpp_get_token (pfile);
+ tok = _cpp_peek_token (pfile, 0);
+ ident = altivec_categorize_keyword (tok);
+ rid_code = (enum rid)(ident->rid_code);
+ }
+
+ if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
+ || rid_code == RID_SHORT || rid_code == RID_SIGNED
+ || rid_code == RID_INT || rid_code == RID_CHAR
+ || rid_code == RID_FLOAT)
+ {
+ expand_this = __vector_keyword;
+ /* If the next keyword is bool or pixel, it
+ will need to be expanded as well. */
+ tok = _cpp_peek_token (pfile, 1);
+ ident = altivec_categorize_keyword (tok);
+
+ if (ident == __pixel_keyword || ident == __bool_keyword)
+ expand_bool_pixel = ident;
+ }
+ }
+ }
+ else if (expand_bool_pixel
+ && (ident == __pixel_keyword || ident == __bool_keyword))
+ {
+ expand_this = expand_bool_pixel;
+ expand_bool_pixel = 0;
+ }
+
+ return expand_this;
+}
+
+static void
+init_vector_keywords (cpp_reader *pfile)
+{
+ /* Keywords without two leading underscores are context-sensitive, and hence
+ implemented as conditional macros, controlled by the rs6000_macro_to_expand()
+ function above. */
+ __vector_keyword = cpp_lookup (pfile, DSC ("__vector"));
+ __vector_keyword->flags |= NODE_CONDITIONAL;
+
+ __pixel_keyword = cpp_lookup (pfile, DSC ("__pixel"));
+ __pixel_keyword->flags |= NODE_CONDITIONAL;
+
+ __bool_keyword = cpp_lookup (pfile, DSC ("__bool"));
+ __bool_keyword->flags |= NODE_CONDITIONAL;
+
+ vector_keyword = cpp_lookup (pfile, DSC ("vector"));
+ vector_keyword->flags |= NODE_CONDITIONAL;
+
+ pixel_keyword = cpp_lookup (pfile, DSC ("pixel"));
+ pixel_keyword->flags |= NODE_CONDITIONAL;
+
+ _Bool_keyword = cpp_lookup (pfile, DSC ("_Bool"));
+ _Bool_keyword->flags |= NODE_CONDITIONAL;
+
+ bool_keyword = cpp_lookup (pfile, DSC ("bool"));
+ bool_keyword->flags |= NODE_CONDITIONAL;
+ return;
+}
+
+/* APPLE LOCAL end AltiVec */
+
void
rs6000_cpu_cpp_builtins (cpp_reader *pfile)
{
@@ -100,6 +243,25 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfile)
builtin_define ("__vector=__attribute__((altivec(vector__)))");
builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
+
+ /* APPLE LOCAL begin AltiVec */
+ builtin_define ("vector=vector");
+ builtin_define ("pixel=pixel");
+ builtin_define ("_Bool=_Bool");
+ builtin_define ("bool=bool");
+ init_vector_keywords (pfile);
+
+ /* Indicate that the compiler supports Apple AltiVec syntax,
+ including context-sensitive keywords. */
+ if (rs6000_altivec_pim)
+ {
+ builtin_define ("__APPLE_ALTIVEC__");
+ builtin_define ("vec_step(T)=(sizeof (__typeof__(T)) / sizeof (__typeof__(T) __attribute__((altivec(element__)))))");
+ }
+
+ /* Enable context-sensitive macros. */
+ cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
+ /* APPLE LOCAL end AltiVec */
}
if (TARGET_SPE)
builtin_define ("__SPE__");
diff --git a/gcc/config/rs6000/rs6000-protos.h b/gcc/config/rs6000/rs6000-protos.h
index 4a4fcde8e2d..283dbd68526 100644
--- a/gcc/config/rs6000/rs6000-protos.h
+++ b/gcc/config/rs6000/rs6000-protos.h
@@ -121,7 +121,7 @@ extern enum rtx_code rs6000_reverse_condition (enum machine_mode,
extern void rs6000_emit_sCOND (enum rtx_code, rtx);
extern void rs6000_emit_cbranch (enum rtx_code, rtx);
extern char * output_cbranch (rtx, const char *, int, rtx);
-extern char * output_e500_flip_eq_bit (rtx, rtx);
+extern char * output_e500_flip_gt_bit (rtx, rtx);
extern rtx rs6000_emit_set_const (rtx, enum machine_mode, rtx, int);
extern int rs6000_emit_cmove (rtx, rtx, rtx, rtx);
extern int rs6000_emit_vector_cond_expr (rtx, rtx, rtx, rtx, rtx, rtx);
@@ -135,6 +135,8 @@ extern int mfcr_operation (rtx, enum machine_mode);
extern int mtcrf_operation (rtx, enum machine_mode);
extern int lmw_operation (rtx, enum machine_mode);
extern struct rtx_def *create_TOC_reference (rtx);
+/* APPLE LOCAL RTX_COST for multiply */
+extern int rs6000_rtx_mult_cost (rtx);
extern void rs6000_split_multireg_move (rtx, rtx);
extern void rs6000_emit_move (rtx, rtx, enum machine_mode);
extern rtx rs6000_legitimize_address (rtx, rtx, enum machine_mode);
@@ -206,12 +208,21 @@ extern int rs6000_tls_symbol_ref (rtx, enum machine_mode);
extern void rs6000_output_dwarf_dtprel (FILE*, int, rtx);
extern int rs6000_hard_regno_nregs (int, enum machine_mode);
extern void rs6000_conditional_register_usage (void);
+/* APPLE LOCAL AltiVec */
+extern tree rs6000_fold_builtin (tree, bool);
+/* APPLE LOCAL CW asm blocks */
+extern const char *rs6000_cw_asm_register_name (const char *, char *);
/* Declare functions in rs6000-c.c */
extern void rs6000_pragma_longcall (struct cpp_reader *);
extern void rs6000_cpu_cpp_builtins (struct cpp_reader *);
+/* APPLE LOCAL begin AltiVec */
+extern struct cpp_hashnode *rs6000_macro_to_expand (struct cpp_reader *,
+ const struct cpp_token *);
+/* APPLE LOCAL end AltiVec */
+
#if TARGET_MACHO
char *output_call (rtx, rtx *, int, int);
#endif
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 267a16131d1..d346e4ec35a 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -50,9 +50,13 @@
#include "target-def.h"
#include "langhooks.h"
#include "reload.h"
+/* APPLE LOCAL why is this needed? */
+#include "insn-addr.h"
#include "cfglayout.h"
#include "sched-int.h"
#include "tree-gimple.h"
+/* APPLE LOCAL mainline 2005-04-14 */
+#include "intl.h"
#if TARGET_XCOFF
#include "xcoffout.h" /* get declarations of xcoff_*_section_name */
#endif
@@ -60,6 +64,17 @@
#include "gstab.h" /* for N_SLINE */
#endif
+/* APPLE LOCAL begin pascal strings */
+#include "../../libcpp/internal.h"
+extern struct cpp_reader* parse_in;
+/* APPLE LOCAL end pascal strings */
+
+/* APPLE LOCAL begin Macintosh alignment */
+#ifndef TARGET_ALIGN_MAC68K
+#define TARGET_ALIGN_MAC68K 0
+#endif
+/* APPLE LOCAL end Macintosh alignment */
+
#ifndef TARGET_NO_PROTOTYPE
#define TARGET_NO_PROTOTYPE 0
#endif
@@ -257,6 +272,10 @@ static GTY(()) tree pixel_V8HI_type_node; /* __vector __pixel */
int rs6000_warn_altivec_long = 1; /* On by default. */
const char *rs6000_warn_altivec_long_switch;
+/* APPLE LOCAL begin AltiVec */
+int rs6000_altivec_pim = 0;
+const char *rs6000_altivec_pim_switch;
+/* APPLE LOCAL end AltiVec */
const char *rs6000_traceback_name;
static enum {
@@ -586,6 +605,12 @@ struct processor_costs ppc8540_cost = {
COSTS_N_INSNS (29), /* ddiv */
};
+/* APPLE LOCAL begin AltiVec */
+/* NB: We do not store the PIM operations/predicates in the
+ VECTOR_BUILTIN_FNS array. */
+static GTY(()) tree vector_builtin_fns[ALTIVEC_PIM__FIRST];
+/* APPLE LOCAL end AltiVec */
+
/* Instruction costs on POWER4 and POWER5 processors. */
static const
struct processor_costs power4_cost = {
@@ -671,6 +696,8 @@ static void rs6000_xcoff_file_end (void);
#endif
#if TARGET_MACHO
static bool rs6000_binds_local_p (tree);
+/* APPLE LOCAL pragma reverse_bitfield */
+static bool rs6000_reverse_bitfields_p (tree);
#endif
static int rs6000_variable_issue (FILE *, int, rtx, int);
static bool rs6000_rtx_costs (rtx, int, int, int *);
@@ -724,6 +751,16 @@ static rtx altivec_expand_predicate_builtin (enum insn_code,
const char *, tree, rtx);
static rtx altivec_expand_lv_builtin (enum insn_code, tree, rtx);
static rtx altivec_expand_stv_builtin (enum insn_code, tree);
+/* APPLE LOCAL begin AltiVec */
+static tree altivec_cov_rt_12 (tree, tree);
+static tree altivec_cov_rt_2p (tree);
+static tree altivec_cov_rt_1d (tree);
+static tree altivec_cov_rt_1h (tree);
+static struct altivec_pim_info *altivec_ovl_resolve (struct altivec_pim_info *,
+ tree, tree);
+static tree altivec_convert_args (tree, tree);
+/* APPLE LOCAL end AltiVec */
+
static void rs6000_parse_abi_options (void);
static void rs6000_parse_alignment_option (void);
static void rs6000_parse_tls_size_option (void);
@@ -748,17 +785,31 @@ static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
static rtx rs6000_complex_function_value (enum machine_mode);
static rtx rs6000_spe_function_arg (CUMULATIVE_ARGS *,
enum machine_mode, tree);
-static rtx rs6000_darwin64_function_arg (CUMULATIVE_ARGS *,
- enum machine_mode, tree, int);
+static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *,
+ HOST_WIDE_INT);
+static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *,
+ tree, HOST_WIDE_INT);
+static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *,
+ HOST_WIDE_INT,
+ rtx[], int *);
+static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *,
+ tree, HOST_WIDE_INT,
+ rtx[], int *);
+static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, tree, int, bool);
static rtx rs6000_mixed_function_arg (enum machine_mode, tree, int);
static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
static void setup_incoming_varargs (CUMULATIVE_ARGS *,
enum machine_mode, tree,
int *, int);
+/* APPLE LOCAL begin Altivec */
+static bool skip_vec_args (tree, int, int*);
+/* APPLE LOCAL end Altivec */
static bool rs6000_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
tree, bool);
static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
tree, bool);
+/* APPLE LOCAL mainline 2005-04-14 */
+static const char *invalid_arg_for_unprototyped_fn (tree, tree, tree);
#if TARGET_MACHO
static void macho_branch_islands (void);
static void add_compiler_branch_island (tree, tree, int);
@@ -936,6 +987,17 @@ static const char alt_reg_names[][8] =
#undef TARGET_INIT_BUILTINS
#define TARGET_INIT_BUILTINS rs6000_init_builtins
+
+/* APPLE LOCAL begin AltiVec */
+/* If we are running in Apple AltiVec (as opposed to FSF AltiVec) mode,
+ we will need to handle the Motorola PIM instructions ourselves instead
+ of relying on <altivec.h>. The rs6000_fold_builtin() routine will
+ rewrite the PIM instructions into the __builtin... (AldyVec)
+ instructions. */
+#undef TARGET_FOLD_BUILTIN
+#define TARGET_FOLD_BUILTIN rs6000_fold_builtin
+/* APPLE LOCAL end AltiVec */
+
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
@@ -948,6 +1010,10 @@ static const char alt_reg_names[][8] =
#if TARGET_MACHO
#undef TARGET_BINDS_LOCAL_P
#define TARGET_BINDS_LOCAL_P rs6000_binds_local_p
+/* APPLE LOCAL begin pragma reverse_bitfields */
+#undef TARGET_REVERSE_BITFIELDS_P
+#define TARGET_REVERSE_BITFIELDS_P rs6000_reverse_bitfields_p
+/* APPLE LOCAL end pragma reverse_bitfields */
#endif
#undef TARGET_ASM_OUTPUT_MI_THUNK
@@ -983,6 +1049,11 @@ static const char alt_reg_names[][8] =
#undef TARGET_SETUP_INCOMING_VARARGS
#define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
+/* APPLE LOCAL begin Altivec */
+#undef TARGET_SKIP_VEC_ARGS
+#define TARGET_SKIP_VEC_ARGS skip_vec_args
+/* APPLE LOCAL end Altivec */
+
/* Always strict argument naming on rs6000. */
#undef TARGET_STRICT_ARGUMENT_NAMING
#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
@@ -1008,6 +1079,11 @@ static const char alt_reg_names[][8] =
#undef TARGET_VECTOR_MODE_SUPPORTED_P
#define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
+/* APPLE LOCAL begin mainline 2005-04-14 */
+
+#undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
+#define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
+/* APPLE LOCAL end mainline 2005-04-14 */
/* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
The PowerPC architecture requires only weak consistency among
@@ -1090,6 +1166,8 @@ rs6000_override_options (const char *default_cpu)
size_t i, j;
struct rs6000_cpu_select *ptr;
int set_masks;
+ /* APPLE LOCAL -fast */
+ enum processor_type mcpu_cpu = PROCESSOR_POWER4;
/* Simplifications for entries below. */
@@ -1173,6 +1251,13 @@ rs6000_override_options (const char *default_cpu)
const size_t ptt_size = ARRAY_SIZE (processor_target_table);
+ /* APPLE LOCAL begin -mmultiple/-mstring fixme */
+ /* Save current -mmultiple/-mno-multiple status. */
+ int multiple = TARGET_MULTIPLE;
+ /* Save current -mstring/-mno-string status. */
+ int string = TARGET_STRING;
+ /* APPLE LOCAL end -mmultiple/-mstring fixme */
+
/* Some OSs don't support saving the high part of 64-bit registers on
context switch. Other OSs don't support saving Altivec registers.
On those OSs, we don't touch the MASK_POWERPC64 or MASK_ALTIVEC
@@ -1198,14 +1283,25 @@ rs6000_override_options (const char *default_cpu)
set_masks &= ~MASK_ALTIVEC;
#endif
- /* Don't override these by the processor default if given explicitly. */
- set_masks &= ~(target_flags_explicit
- & (MASK_MULTIPLE | MASK_STRING | MASK_SOFT_FLOAT));
+ /* Don't override by the processor default if given explicitly. */
+ set_masks &= ~target_flags_explicit;
/* Identify the processor type. */
rs6000_select[0].string = default_cpu;
rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
+ /* APPLE LOCAL begin -fast */
+ if (flag_fast || flag_fastf || flag_fastcp)
+ {
+ if (rs6000_select[1].string == (char *)0 && rs6000_select[2].string == (char *)0)
+ {
+ /* -mcpu and -mtune unspecified. Assume both are G5 */
+ set_target_switch ("tune=G5");
+ set_target_switch ("cpu=G5");
+ }
+ }
+ /* APPLE LOCAL end -fast */
+
for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
{
ptr = &rs6000_select[i];
@@ -1222,6 +1318,9 @@ rs6000_override_options (const char *default_cpu)
target_flags &= ~set_masks;
target_flags |= (processor_target_table[j].target_enable
& set_masks);
+ /* APPLE LOCAL begin -fast */
+ mcpu_cpu = processor_target_table[j].processor;
+ /* APPLE LOCAL end -fast */
}
break;
}
@@ -1231,13 +1330,115 @@ rs6000_override_options (const char *default_cpu)
}
}
+ /* APPLE LOCAL begin AltiVec */
+ /* If '-maltivec' has been specified or if anything else turns on
+ AltiVec, enable AltiVec optimizations, even if previously turned
+ off via '-faltivec'. */
+ if (TARGET_ALTIVEC)
+ flag_disable_opts_for_faltivec = 0;
+
+ /* Handle -m(no-)pim-altivec. */
+ if (rs6000_altivec_pim_switch)
+ {
+ const char *base = rs6000_altivec_pim_switch - strlen ("pim-altivec");;
+ while (base[-1] != 'm') base--;
+ if (*rs6000_altivec_pim_switch != '\0')
+ error ("invalid option `%s'", base);
+
+ rs6000_altivec_pim = (base[0] != 'n');
+ /* If '-faltivec' or '-mpim-altivec' has been specified and we
+ have not already selected AltiVec codegen, disable certain
+ unsafe AltiVec optimizations so that the resulting binary can
+ run on a G3. These may be re-enabled by subsequently
+ specifying '-maltivec' or '-mcpu=xxx', where xxx supports
+ AltiVec instructions. */
+ if (rs6000_altivec_pim)
+ {
+ if (! TARGET_ALTIVEC)
+ {
+ flag_disable_opts_for_faltivec = 1;
+ target_flags |= MASK_ALTIVEC;
+ }
+ }
+ else
+ target_flags &= ~MASK_ALTIVEC;
+ }
+ /* APPLE LOCAL end AltiVec */
+
+ /* APPLE LOCAL begin -fast */
+ if (flag_fast || flag_fastf || flag_fastcp)
+ {
+ flag_gcse_sm = 1;
+ rs6000_sched_insert_nops = sched_finish_regroup_exact;
+ flag_unroll_loops = 1;
+ flag_tree_loop_linear = 1;
+ flag_strict_aliasing = 1;
+ flag_schedule_interblock = 1;
+ align_jumps_max_skip = 15;
+ align_loops_max_skip = 15;
+ align_functions = 16;
+ align_loops = 16;
+ align_jumps = 16;
+ set_fast_math_flags (1);
+ flag_reorder_blocks = 1;
+ /* APPLE LOCAL disable this until it works better. */
+ flag_speculative_prefetching = 0;
+ if (flag_branch_probabilities && !flag_exceptions)
+ flag_reorder_blocks_and_partition = 1;
+ if (!flag_pic)
+ set_target_switch ("dynamic-no-pic");
+
+ if (mcpu_cpu == PROCESSOR_POWER4)
+ {
+ set_target_switch ("powerpc-gpopt");
+ set_target_switch ("powerpc64");
+ }
+ if (flag_fast || flag_fastcp)
+ /* This doesn't work with NAG Fortran output. The gcc 3.5 C++ libraries
+ have been adjusted so that it now works with them. */
+ set_target_switch ("align-natural");
+ if (flag_fastf)
+ /* This applies Fortran argument semantics; for NAG Fortran output only. */
+ flag_argument_noalias = 2;
+ /* IMI flags */
+ disable_typechecking_for_spec_flag = 1;
+ flag_unit_at_a_time = 1;
+ }
+ /* APPLE LOCAL end -fast */
+
+ /* APPLE LOCAL rs6000_init_hard_regno_mode_ok must come AFTER setting of -fast flags */
+ rs6000_init_hard_regno_mode_ok ();
+
if (TARGET_E500)
rs6000_isel = 1;
+ /* APPLE LOCAL begin Disable string insns with -Os on Darwin (radar 3509006) */
/* If we are optimizing big endian systems for space, use the load/store
- multiple and string instructions. */
+ multiple instructions. */
if (BYTES_BIG_ENDIAN && optimize_size)
- target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING);
+ target_flags |= ~target_flags_explicit & MASK_MULTIPLE;
+
+ /* If we are optimizing big endian systems for space, use the
+ string instructions. But do not do this for Darwin, as the
+ kernel can't properly support some hardware that doesn't have
+ these instructions. It's not clear that the compiler is the
+ right place to fix this, but that's how it is for now. See
+ *extensive* discussion in Radar 3509006. */
+ if (BYTES_BIG_ENDIAN && optimize_size && DEFAULT_ABI != ABI_DARWIN)
+ target_flags |= MASK_STRING;
+ /* APPLE LOCAL end Disable string insns with -Os on Darwin (radar 3509006) */
+
+ /* APPLE LOCAL begin -mmultiple/-mstring fixme */
+ /* If -mmultiple or -mno-multiple was explicitly used, don't
+ override with the processor default */
+ if ((target_flags_explicit & MASK_MULTIPLE) != 0)
+ target_flags = (target_flags & ~MASK_MULTIPLE) | multiple;
+
+ /* If -mstring or -mno-string was explicitly used, don't override
+ with the processor default. */
+ if ((target_flags_explicit & MASK_STRING) != 0)
+ target_flags = (target_flags & ~MASK_STRING) | string;
+ /* APPLE LOCAL end -mmultiple/-mstring fixme */
/* Don't allow -mmultiple or -mstring on little endian systems
unless the cpu is a 750, because the hardware doesn't support the
@@ -1315,7 +1516,11 @@ rs6000_override_options (const char *default_cpu)
/* Setting to empty string is same as "-mone-byte-bool". */
#if TARGET_MACHO
darwin_one_byte_bool = "";
+ /* APPLE LOCAL pragma reverse_bitfields */
+ darwin_reverse_bitfields = 0;
#endif
+ /* Default to natural alignment, for better performance. */
+ rs6000_alignment_flags = MASK_ALIGN_NATURAL;
}
/* Handle -mabi= options. */
@@ -1417,9 +1622,11 @@ rs6000_override_options (const char *default_cpu)
rs6000_sched_restricted_insns_priority =
atoi (rs6000_sched_restricted_insns_priority_str);
+ /* APPLE LOCAL begin only consider true dependency for grouping */
/* Handle -msched-costly-dep option. */
rs6000_sched_costly_dep
- = (rs6000_sched_groups ? store_to_load_dep_costly : no_dep_costly);
+ = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
+ /* APPLE LOCAL end only consider true dependency for grouping */
if (rs6000_sched_costly_dep_str)
{
if (! strcmp (rs6000_sched_costly_dep_str, "no"))
@@ -1511,6 +1718,12 @@ rs6000_override_options (const char *default_cpu)
if (DEFAULT_ABI != ABI_AIX)
targetm.calls.split_complex_arg = NULL;
+ /* APPLE LOCAL begin AltiVec */
+ /* Enable '(vector signed int)(a, b, c, d)' vector literal notation. */
+ if (TARGET_ALTIVEC)
+ targetm.cast_expr_as_vector_init = true;
+ /* APPLE LOCAL end AltiVec */
+
/* Initialize rs6000_cost with the appropriate target costs. */
if (optimize_size)
rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
@@ -1683,8 +1896,27 @@ rs6000_parse_alignment_option (void)
{
if (rs6000_alignment_string == 0)
return;
+ /* APPLE LOCAL begin Macintosh alignment 2002-2-26 --ff */
+ else if (! strcmp (rs6000_alignment_string, "mac68k"))
+ {
+ /* The old mac68k alignment has zero value for 64-bit work,
+ forbid its use. */
+ if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
+ error ("-malign-mac68k is not allowed for 64-bit Darwin");
+ rs6000_alignment_flags = MASK_ALIGN_MAC68K;
+ }
+ /* APPLE LOCAL end Macintosh alignment 2002-2-26 --ff */
else if (! strcmp (rs6000_alignment_string, "power"))
- rs6000_alignment_flags = MASK_ALIGN_POWER;
+ {
+ /* On 64-bit Darwin, power alignment is ABI-incompatible with
+ some C library functions, so warn about it. The flag may be
+ useful for performance studies from time to time though, so
+ don't disable it entirely. */
+ if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
+ warning ("-malign-power is not supported for 64-bit Darwin;"
+ " it is incompatible with the installed C and C++ libraries");
+ rs6000_alignment_flags = MASK_ALIGN_POWER;
+ }
else if (! strcmp (rs6000_alignment_string, "natural"))
rs6000_alignment_flags = MASK_ALIGN_NATURAL;
else
@@ -1712,7 +1944,61 @@ rs6000_parse_tls_size_option (void)
void
optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
{
-}
+ /* APPLE LOCAL begin tweak default optimizations */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ /* Turn these on only if specifically requested, not with -O* */
+ /* Strict aliasing breaks too much existing code */
+ flag_strict_aliasing = 0;
+ /* Block reordering causes code bloat, and very little speedup */
+ flag_reorder_blocks = 0;
+ /* Multi-basic-block scheduling loses badly when the compiler
+ misguesses which blocks are going to be executed, more than
+ it gains when it guesses correctly. Its guesses for cases
+ where interblock scheduling occurs (if-then-else's) are
+ little better than random, so disable this unless requested. */
+ flag_schedule_interblock = 0;
+ /* The Darwin libraries never set errno, so we might as well
+ avoid calling them when that's the only reason we would. */
+ flag_errno_math = 0;
+ /* Trapping math is not needed by many users, and is expensive.
+ C99 permits us to default it off and we do that. It is
+ turned on when <fenv.h> is included (see darwin_pragma_fenv
+ in darwin-c.c). */
+ flag_trapping_math = 0;
+ }
+ /* APPLE LOCAL end tweak default optimizations */
+}
+
+/* APPLE LOCAL begin optimization pragmas 3124235/3420242 */
+/* Version of the above for use from #pragma optimization_level.
+ Do not reset things unless they're per-function. */
+
+void
+reset_optimization_options (int level ATTRIBUTE_UNUSED,
+ int size ATTRIBUTE_UNUSED)
+{
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ /* Block reordering causes code bloat, and very little speedup */
+ flag_reorder_blocks = 0;
+ /* Multi-basic-block scheduling loses badly when the compiler
+ misguesses which blocks are going to be executed, more than
+ it gains when it guesses correctly. Its guesses for cases
+ where interblock scheduling occurs (if-then-else's) are
+ little better than random, so disable this unless requested. */
+ flag_schedule_interblock = 0;
+ /* The Darwin libraries never set errno, so we might as well
+ avoid calling them when that's the only reason we would. */
+ flag_errno_math = 0;
+ /* Trapping math is not needed by many users, and is expensive.
+ C99 permits us to default it off and we do that. It is
+ turned on when <fenv.h> is included (see darwin_pragma_fenv
+ in darwin-c.c). */
+ flag_trapping_math = 0;
+ }
+}
+/* APPLE LOCAL end optimization pragmas 3124235/3420242 */
/* Do anything needed at the start of the asm file. */
@@ -1774,6 +2060,38 @@ rs6000_file_start (void)
toc_section ();
text_section ();
}
+
+ /* APPLE LOCAL begin lno */
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ /* Emit declarations for all code sections at the beginning of the file; this
+ keeps them from being separated by data sections, which can lead to
+ out-of-range branches. Also align the unlikely text section properly; the
+ first real occurrence of this may be a label within a function, which does
+ not otherwise get aligned. */
+ if (flag_pic || MACHO_DYNAMIC_NO_PIC_P || flag_reorder_blocks_and_partition)
+ {
+ fprintf (asm_out_file, "\t.section __TEXT,__text,regular,pure_instructions\n");
+ if (flag_reorder_blocks_and_partition)
+ {
+ fprintf (asm_out_file, "\t.section __TEXT,__unlikely,regular,pure_instructions\n");
+ fprintf (asm_out_file, "\t.align 2\n");
+ }
+ if (MACHO_DYNAMIC_NO_PIC_P )
+ {
+ fprintf (asm_out_file, "\t.section __TEXT,__symbol_stub1,");
+ fprintf (asm_out_file, "symbol_stubs,pure_instructions,16\n");
+ }
+ else
+ {
+ fprintf (asm_out_file, "\t.section __TEXT,__picsymbolstub1,");
+ fprintf (asm_out_file, "symbol_stubs,pure_instructions,32\n");
+ }
+ }
+ }
+#endif
+ /* APPLE LOCAL end lno */
}
@@ -2966,6 +3284,11 @@ call_operand (rtx op, enum machine_mode mode)
return 0;
return (GET_CODE (op) == SYMBOL_REF
+ /* APPLE LOCAL begin accept hard R12 as target reg */
+#ifdef MAGIC_INDIRECT_CALL_REG
+ || (GET_CODE (op) == REG && REGNO (op) == MAGIC_INDIRECT_CALL_REG)
+#endif
+ /* APPLE LOCAL end accept hard R12 as target reg */
|| (GET_CODE (op) == REG
&& (REGNO (op) == LINK_REGISTER_REGNUM
|| REGNO (op) == COUNT_REGISTER_REGNUM
@@ -3652,21 +3975,16 @@ rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
rs6000_emit_move (got, gsym, Pmode);
else
{
- char buf[30];
- static int tls_got_labelno = 0;
- rtx tempLR, lab, tmp3, mem;
+ rtx tempLR, tmp3, mem;
rtx first, last;
- ASM_GENERATE_INTERNAL_LABEL (buf, "LTLS", tls_got_labelno++);
- lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
tempLR = gen_reg_rtx (Pmode);
tmp1 = gen_reg_rtx (Pmode);
tmp2 = gen_reg_rtx (Pmode);
tmp3 = gen_reg_rtx (Pmode);
mem = gen_const_mem (Pmode, tmp1);
- first = emit_insn (gen_load_toc_v4_PIC_1b (tempLR, lab,
- gsym));
+ first = emit_insn (gen_load_toc_v4_PIC_1b (tempLR, gsym));
emit_move_insn (tmp1, tempLR);
emit_move_insn (tmp2, mem);
emit_insn (gen_addsi3 (tmp3, tmp1, tmp2));
@@ -3870,6 +4188,7 @@ rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
&& REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
&& GET_CODE (XEXP (x, 1)) == CONST_INT
&& (INTVAL (XEXP (x, 1)) & 3) != 0
+ && !ALTIVEC_VECTOR_MODE (mode)
&& GET_MODE_SIZE (mode) >= UNITS_PER_WORD
&& TARGET_POWERPC64)
{
@@ -4318,6 +4637,57 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
return dest;
}
+/* APPLE LOCAL begin RTX_COST for multiply */
+int
+rs6000_rtx_mult_cost (rtx x)
+{
+ switch (rs6000_cpu)
+ {
+ case PROCESSOR_RIOS1:
+ case PROCESSOR_PPC405:
+ return (GET_CODE (XEXP (x, 1)) != CONST_INT
+ ? COSTS_N_INSNS (5)
+ : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255
+ ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4));
+ case PROCESSOR_RS64A:
+ return (GET_CODE (XEXP (x, 1)) != CONST_INT
+ ? GET_MODE (XEXP (x, 1)) != DImode
+ ? COSTS_N_INSNS (20) : COSTS_N_INSNS (34)
+ : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255
+ ? COSTS_N_INSNS (8) : COSTS_N_INSNS (12));
+ case PROCESSOR_RIOS2:
+ case PROCESSOR_MPCCORE:
+ case PROCESSOR_PPC604e:
+ return COSTS_N_INSNS (2);
+ case PROCESSOR_PPC601:
+ return COSTS_N_INSNS (5);
+ case PROCESSOR_PPC603:
+ case PROCESSOR_PPC7400:
+ case PROCESSOR_PPC750:
+ return (GET_CODE (XEXP (x, 1)) != CONST_INT
+ ? COSTS_N_INSNS (5)
+ : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255
+ ? COSTS_N_INSNS (2) : COSTS_N_INSNS (3));
+ case PROCESSOR_PPC7450:
+ return (GET_CODE (XEXP (x, 1)) != CONST_INT
+ ? COSTS_N_INSNS (4)
+ : COSTS_N_INSNS (3));
+ case PROCESSOR_PPC403:
+ case PROCESSOR_PPC604:
+ return COSTS_N_INSNS (4);
+ case PROCESSOR_PPC620:
+ case PROCESSOR_PPC630:
+ return (GET_CODE (XEXP (x, 1)) != CONST_INT
+ ? GET_MODE (XEXP (x, 1)) != DImode
+ ? COSTS_N_INSNS (5) : COSTS_N_INSNS (7)
+ : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255
+ ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4));
+ default:
+ abort ();
+ }
+}
+/* APPLE LOCAL end RTX_COST for multiply */
+
/* Helper for the following. Get rid of [r+r] memory refs
in cases where it won't work (TImode, TFmode). */
@@ -4716,12 +5086,23 @@ rs6000_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
{
/* In the darwin64 abi, try to use registers for larger structs
if possible. */
- if (AGGREGATE_TYPE_P (type)
- && rs6000_darwin64_abi
+ if (rs6000_darwin64_abi
&& TREE_CODE (type) == RECORD_TYPE
- && ((unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 32)
- && ((unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 0))
- return false;
+ && int_size_in_bytes (type) > 0)
+ {
+ CUMULATIVE_ARGS valcum;
+ rtx valret;
+
+ valcum.words = 0;
+ valcum.fregno = FP_ARG_MIN_REG;
+ valcum.vregno = ALTIVEC_ARG_MIN_REG;
+ /* Do a trial code generation as if this were going to be passed
+ as an argument; if any part goes in memory, we return NULL. */
+ valret = rs6000_darwin64_record_arg (&valcum, type, 1, true);
+ if (valret)
+ return false;
+ /* Otherwise fall through to more conventional ABI rules. */
+ }
if (AGGREGATE_TYPE_P (type)
&& (TARGET_AIX_STRUCT_RET
@@ -4912,6 +5293,9 @@ function_arg_boundary (enum machine_mode mode, tree type)
|| (type && TREE_CODE (type) == VECTOR_TYPE
&& int_size_in_bytes (type) >= 16))
return 128;
+ else if (rs6000_darwin64_abi && mode == BLKmode
+ && type && TYPE_ALIGN (type) > 64)
+ return 128;
else
return PARM_BOUNDARY;
}
@@ -4934,46 +5318,84 @@ rs6000_arg_size (enum machine_mode mode, tree type)
return (size + 7) >> 3;
}
-/* The darwin64 ABI calls for us to recurse down through structs,
- applying the same rules to struct elements as if a reference to
- each were being passed directly. */
+/* Use this to flush pending int fields. */
static void
-darwin64_function_arg_advance (CUMULATIVE_ARGS *cum, tree type,
- int named, int depth)
+rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
+ HOST_WIDE_INT bitpos)
{
- tree f, ftype;
- int i, tot;
+ unsigned int startbit, endbit;
+ int intregs, intoffset;
+ enum machine_mode mode;
- switch (TREE_CODE (type))
- {
- case RECORD_TYPE:
- for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
- if (TREE_CODE (f) == FIELD_DECL)
- {
- ftype = TREE_TYPE (f);
- function_arg_advance (cum, TYPE_MODE (ftype), ftype,
- named, depth + 1);
- }
- break;
+ if (cum->intoffset == -1)
+ return;
- case ARRAY_TYPE:
- tot = int_size_in_bytes (type);
- if (tot <= 0)
- return;
- ftype = TREE_TYPE (type);
- tot /= int_size_in_bytes (ftype);
-
- for (i = 0; i < tot; ++i)
+ intoffset = cum->intoffset;
+ cum->intoffset = -1;
+
+ if (intoffset % BITS_PER_WORD != 0)
+ {
+ mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
+ MODE_INT, 0);
+ if (mode == BLKmode)
{
- function_arg_advance (cum, TYPE_MODE (ftype), ftype,
- named, depth + 1);
+ /* We couldn't find an appropriate mode, which happens,
+ e.g., in packed structs when there are 3 bytes to load.
+ Back intoffset back to the beginning of the word in this
+ case. */
+ intoffset = intoffset & -BITS_PER_WORD;
}
- break;
-
- default:
- abort ();
}
+
+ startbit = intoffset & -BITS_PER_WORD;
+ endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
+ intregs = (endbit - startbit) / BITS_PER_WORD;
+ cum->words += intregs;
+}
+
+/* The darwin64 ABI calls for us to recurse down through structs,
+ looking for elements passed in registers. Unfortunately, we have
+ to track int register count here also because of misalignments
+ in powerpc alignment mode. */
+
+static void
+rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
+ tree type,
+ HOST_WIDE_INT startbitpos)
+{
+ tree f;
+
+ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ HOST_WIDE_INT bitpos = startbitpos;
+ tree ftype = TREE_TYPE (f);
+ enum machine_mode mode = TYPE_MODE (ftype);
+
+ if (DECL_SIZE (f) != 0
+ && host_integerp (bit_position (f), 1))
+ bitpos += int_bit_position (f);
+
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (ftype) == RECORD_TYPE)
+ rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
+ else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
+ {
+ rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
+ cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
+ cum->words += (GET_MODE_SIZE (mode) + 7) >> 3;
+ }
+ else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
+ {
+ rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
+ cum->vregno++;
+ cum->words += 2;
+ }
+ else if (cum->intoffset == -1)
+ cum->intoffset = bitpos;
+ }
}
/* Update the data in CUM to advance over an argument
@@ -4988,6 +5410,8 @@ void
function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
tree type, int named, int depth)
{
+ int size;
+
/* Only tick off an argument if we're not recursing. */
if (depth == 0)
cum->nargs_prototype--;
@@ -5051,10 +5475,30 @@ function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
else if (rs6000_darwin64_abi
&& mode == BLKmode
- && (TREE_CODE (type) == RECORD_TYPE
- || TREE_CODE (type) == ARRAY_TYPE))
- darwin64_function_arg_advance (cum, type, named, depth);
-
+ && TREE_CODE (type) == RECORD_TYPE
+ && (size = int_size_in_bytes (type)) > 0)
+ {
+ /* Variable sized types have size == -1 and are
+ treated as if consisting entirely of ints.
+ Pad to 16 byte boundary if needed. */
+ if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
+ && (cum->words % 2) != 0)
+ cum->words++;
+ /* For varargs, we can just go up by the size of the struct. */
+ if (!named)
+ cum->words += (size + 7) / 8;
+ else
+ {
+ /* It is tempting to say int register count just goes up by
+ sizeof(type)/8, but this is wrong in a case such as
+ { int; double; int; } [powerpc alignment]. We have to
+ grovel through the fields for these too. */
+ cum->intoffset = 0;
+ rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
+ rs6000_darwin64_record_arg_advance_flush (cum,
+ size * BITS_PER_UNIT);
+ }
+ }
else if (DEFAULT_ABI == ABI_V4)
{
if (TARGET_HARD_FLOAT && TARGET_FPRS
@@ -5213,136 +5657,184 @@ rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
}
}
-/* For the darwin64 ABI, we want to construct a PARALLEL consisting of
- the register(s) to be used for each field and subfield of a struct
- being passed by value, along with the offset of where the
- register's value may be found in the block. */
+/* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
+ structure between cum->intoffset and bitpos to integer registers. */
-static rtx
-rs6000_darwin64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
- tree type, int named)
+static void
+rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
+ HOST_WIDE_INT bitpos, rtx rvec[], int *k)
{
- tree f, ftype, offset;
- rtx rvec[FIRST_PSEUDO_REGISTER], sub, suboff, roffset;
- int k = 0, i, j, bytepos, subbytepos, tot;
- CUMULATIVE_ARGS saved_cum = *cum;
- enum machine_mode submode;
+ enum machine_mode mode;
+ unsigned int regno;
+ unsigned int startbit, endbit;
+ int this_regno, intregs, intoffset;
+ rtx reg;
+
+ if (cum->intoffset == -1)
+ return;
- switch (TREE_CODE (type))
+ intoffset = cum->intoffset;
+ cum->intoffset = -1;
+
+ /* If this is the trailing part of a word, try to only load that
+ much into the register. Otherwise load the whole register. Note
+ that in the latter case we may pick up unwanted bits. It's not a
+ problem at the moment but may wish to revisit. */
+
+ if (intoffset % BITS_PER_WORD != 0)
{
- case RECORD_TYPE:
- for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
- if (TREE_CODE (f) == FIELD_DECL)
+ mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
+ MODE_INT, 0);
+ if (mode == BLKmode)
+ {
+ /* We couldn't find an appropriate mode, which happens,
+ e.g., in packed structs when there are 3 bytes to load.
+ Back intoffset back to the beginning of the word in this
+ case. */
+ intoffset = intoffset & -BITS_PER_WORD;
+ mode = word_mode;
+ }
+ }
+ else
+ mode = word_mode;
+
+ startbit = intoffset & -BITS_PER_WORD;
+ endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
+ intregs = (endbit - startbit) / BITS_PER_WORD;
+ this_regno = cum->words + intoffset / BITS_PER_WORD;
+
+ if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
+ cum->use_stack = 1;
+
+ intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
+ if (intregs <= 0)
+ return;
+
+ intoffset /= BITS_PER_UNIT;
+ do
+ {
+ regno = GP_ARG_MIN_REG + this_regno;
+ reg = gen_rtx_REG (mode, regno);
+ rvec[(*k)++] =
+ gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
+
+ this_regno += 1;
+ intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
+ mode = word_mode;
+ intregs -= 1;
+ }
+ while (intregs > 0);
+}
+
+/* Recursive workhorse for the following. */
+
+static void
+rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, tree type,
+ HOST_WIDE_INT startbitpos, rtx rvec[],
+ int *k)
+{
+ tree f;
+
+ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ HOST_WIDE_INT bitpos = startbitpos;
+ tree ftype = TREE_TYPE (f);
+ enum machine_mode mode = TYPE_MODE (ftype);
+
+ if (DECL_SIZE (f) != 0
+ && host_integerp (bit_position (f), 1))
+ bitpos += int_bit_position (f);
+
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (ftype) == RECORD_TYPE)
+ rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
+ else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
{
- ftype = TREE_TYPE (f);
- offset = DECL_FIELD_OFFSET (f);
- bytepos = int_bit_position (f) / BITS_PER_UNIT;
- /* Force substructs to be handled as BLKmode even if
- they're small enough to be recorded as DImode, so we
- drill through to non-record fields. */
- submode = TYPE_MODE (ftype);
- if (TREE_CODE (ftype) == RECORD_TYPE)
- submode = BLKmode;
- sub = function_arg (cum, submode, ftype, named);
- if (sub == NULL_RTX)
- return NULL_RTX;
- if (GET_CODE (sub) == PARALLEL)
- {
- for (i = 0; i < XVECLEN (sub, 0); i++)
- {
- rtx subsub = XVECEXP (sub, 0, i);
- suboff = XEXP (subsub, 1);
- subbytepos = INTVAL (suboff);
- subbytepos += bytepos;
- roffset = gen_rtx_CONST_INT (SImode, subbytepos);
- subsub = XEXP (subsub, 0);
- rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, subsub, roffset);
- }
- }
- else
+#if 0
+ switch (mode)
{
- roffset = gen_rtx_CONST_INT (SImode, bytepos);
- rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, sub, roffset);
+ case SCmode: mode = SFmode; break;
+ case DCmode: mode = DFmode; break;
+ case TCmode: mode = TFmode; break;
+ default: break;
}
- /* Now do an arg advance to get all the cumulative arg
- stuff set correctly for the next subfield. Note that it
- has no lasting effect, because it is being done on a
- temporary copy of the cumulative arg data. */
- function_arg_advance (cum, submode, ftype, named, 1);
+#endif
+ rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
+ rvec[(*k)++]
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->fregno++),
+ GEN_INT (bitpos / BITS_PER_UNIT));
+ if (mode == TFmode)
+ cum->fregno++;
}
- break;
-
- case UNION_TYPE:
- tot = rs6000_arg_size (mode, type);
- if (tot <= 0)
- return NULL_RTX;
- bytepos = 0;
-
- for (j = 0; j < tot; ++j)
- {
- sub = gen_rtx_REG ((TARGET_64BIT ? DImode : SImode), GP_ARG_MIN_REG + cum->words++);
- roffset = gen_rtx_CONST_INT (SImode, bytepos);
- rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, sub, roffset);
- if (cum->words >= GP_ARG_NUM_REG)
- break;
- bytepos += (TARGET_64BIT ? 8 : 4);
- }
- break;
+ else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
+ {
+ rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
+ rvec[(*k)++]
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->vregno++),
+ GEN_INT (bitpos / BITS_PER_UNIT));
+ }
+ else if (cum->intoffset == -1)
+ cum->intoffset = bitpos;
+ }
+}
- case ARRAY_TYPE:
- tot = int_size_in_bytes (type);
- if (tot <= 0)
- return NULL_RTX;
- ftype = TREE_TYPE (type);
- tot /= int_size_in_bytes (ftype);
- bytepos = 0;
+/* For the darwin64 ABI, we want to construct a PARALLEL consisting of
+ the register(s) to be used for each field and subfield of a struct
+ being passed by value, along with the offset of where the
+ register's value may be found in the block. FP fields go in FP
+ register, vector fields go in vector registers, and everything
+ else goes in int registers, packed as in memory.
- for (j = 0; j < tot; ++j)
- {
- /* Force substructs to be handled as BLKmode even if
- they're small enough to be recorded as DImode, so we
- drill through to non-record fields. */
- submode = TYPE_MODE (ftype);
- if (TREE_CODE (ftype) == RECORD_TYPE)
- submode = BLKmode;
- sub = function_arg (cum, submode, ftype, named);
- if (sub == NULL_RTX)
- return NULL_RTX;
- if (GET_CODE (sub) == PARALLEL)
- {
- for (i = 0; i < XVECLEN (sub, 0); i++)
- {
- rtx subsub = XVECEXP (sub, 0, i);
-
- suboff = XEXP (subsub, 1);
- subbytepos = INTVAL (suboff);
- subbytepos += bytepos;
- roffset = gen_rtx_CONST_INT (SImode, subbytepos);
- subsub = XEXP (subsub, 0);
- rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, subsub, roffset);
- }
- }
- else
- {
- roffset = gen_rtx_CONST_INT (SImode, bytepos);
- rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, sub, roffset);
- }
- /* Now do an arg advance to get all the cumulative arg
- stuff set correctly for the next subfield. Note that it
- has no lasting effect, because it is being done on a
- temporary copy of the cumulative arg data. */
- function_arg_advance (cum, submode, ftype, named, 1);
- bytepos += int_size_in_bytes (ftype);
- }
- break;
+ This code is also used for function return values. RETVAL indicates
+ whether this is the case.
- default:
- abort ();
- }
+ Much of this is taken from the Sparc V9 port, which has a similar
+ calling convention. */
- *cum = saved_cum;
- if (k > 0)
- return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
+static rtx
+rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, tree type,
+ int named, bool retval)
+{
+ rtx rvec[FIRST_PSEUDO_REGISTER];
+ int k = 1, kbase = 1;
+ HOST_WIDE_INT typesize = int_size_in_bytes (type);
+ /* This is a copy; modifications are not visible to our caller. */
+ CUMULATIVE_ARGS copy_cum = *orig_cum;
+ CUMULATIVE_ARGS *cum = &copy_cum;
+
+ /* Pad to 16 byte boundary if needed. */
+ if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
+ && (cum->words % 2) != 0)
+ cum->words++;
+
+ cum->intoffset = 0;
+ cum->use_stack = 0;
+ cum->named = named;
+
+ /* Put entries into rvec[] for individual FP and vector fields, and
+ for the chunks of memory that go in int regs. Note we start at
+ element 1; 0 is reserved for an indication of using memory, and
+ may or may not be filled in below. */
+ rs6000_darwin64_record_arg_recurse (cum, type, 0, rvec, &k);
+ rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
+
+ /* If any part of the struct went on the stack put all of it there.
+ This hack is because the generic code for
+ FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
+ parts of the struct are not at the beginning. */
+ if (cum->use_stack)
+ {
+ if (retval)
+ return NULL_RTX; /* doesn't go in registers at all */
+ kbase = 0;
+ rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+ }
+ if (k > 1 || cum->use_stack)
+ return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
else
return NULL_RTX;
}
@@ -5406,7 +5898,8 @@ rs6000_mixed_function_arg (enum machine_mode mode, tree type, int align_words)
This is null for libcalls where that information may
not be available.
CUM is a variable of type CUMULATIVE_ARGS which gives info about
- the preceding args and about the function being called.
+ the preceding args and about the function being called. It is
+ not modified in this routine.
NAMED is nonzero if this argument is a named parameter
(otherwise it is an extra parameter matching an ellipsis).
@@ -5454,13 +5947,10 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
return GEN_INT (cum->call_cookie);
}
- if (mode == BLKmode
- && rs6000_darwin64_abi
- && (TREE_CODE (type) == RECORD_TYPE
- || TREE_CODE (type) == UNION_TYPE
- || TREE_CODE (type) == ARRAY_TYPE))
+ if (rs6000_darwin64_abi && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE)
{
- rtx rslt = rs6000_darwin64_function_arg (cum, mode, type, named);
+ rtx rslt = rs6000_darwin64_record_arg (cum, type, named, false);
if (rslt != NULL_RTX)
return rslt;
/* Else fall through to usual handling. */
@@ -5701,6 +6191,12 @@ rs6000_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
&& cum->nargs_prototype >= 0)
return 0;
+ /* In this complicated case we just disable the partial_nregs code. */
+ if (rs6000_darwin64_abi && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE
+ && int_size_in_bytes (type) > 0)
+ return 0;
+
align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
parm_offset = TARGET_32BIT ? 2 : 0;
align_words = cum->words + ((parm_offset - cum->words) & align);
@@ -5716,16 +6212,16 @@ rs6000_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
&& align_words >= GP_ARG_NUM_REG))))
{
if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3) > FP_ARG_MAX_REG + 1)
- ret = FP_ARG_MAX_REG + 1 - cum->fregno;
+ /* APPLE LOCAL mainline 2005-04-21 */
+ ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
else if (cum->nargs_prototype >= 0)
return 0;
}
if (align_words < GP_ARG_NUM_REG
&& GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
- ret = GP_ARG_NUM_REG - align_words;
-
- ret *= (TARGET_32BIT ? 4 : 8);
+ /* APPLE LOCAL mainline 2005-04-21 */
+ ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
if (ret != 0 && TARGET_DEBUG_ARG)
fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
@@ -5926,6 +6422,32 @@ setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
}
}
+/* APPLE LOCAL begin Altivec */
+/* This routine determins if an extra pass over argument list is needed
+ for vector aruments. It returns true, if current argument need be
+ skipped. This depends on if we are in the first iteration (to skip
+ vectors), or 2nd iteration (to skip non-vectors).
+*/
+
+static
+bool skip_vec_args(tree arg_type, int pass, int *last_pass)
+{
+ if (DEFAULT_ABI != ABI_DARWIN)
+ return false;
+
+ if (TREE_CODE (arg_type) == VECTOR_TYPE)
+ {
+ *last_pass = 2;
+ if (pass == 1)
+ return true;
+ }
+ else if (pass == 2)
+ return true;
+ return false;
+}
+/* APPLE LOCAL end Altivec */
+
+
/* Create the va_list data type. */
static tree
@@ -6217,12 +6739,117 @@ rs6000_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
/* Builtins. */
+/* APPLE LOCAL begin Altivec */
#define def_builtin(MASK, NAME, TYPE, CODE) \
do { \
if ((MASK) & target_flags) \
- lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
+ vector_builtin_fns[(CODE)] = lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
NULL, NULL_TREE); \
} while (0)
+/* APPLE LOCAL end Altivec */
+
+/* APPLE LOCAL begin AltiVec */
+/* The AltiVec PIM operations and predicates (used in Apple AltiVec mode)
+ are stored in ALTIVEC_PIM_TABLE below, each annotated with flags indicating
+ how its arguments should be matched and/or how its return type is to be
+ determined. */
+
+enum pim_flags
+{
+ /* CR6 predicate modifiers. Not used for operations. For predicates,
+ one of the following four values shall be prepended to the argument
+ list as an INTEGER_CST. */
+
+ pim_cr6_eq = 0, /* __CR6_EQ */
+ pim_cr6_ne = 1, /* __CR6_EQ_REV */
+ pim_cr6_lt = 2, /* __CR6_LT */
+ pim_cr6_ge = 3, /* __CR6_LT_REV */
+ pim_cr6_MASK = pim_cr6_eq | pim_cr6_ne | pim_cr6_lt | pim_cr6_ge,
+
+ /* Function overload argument matching. Operations and predicates with
+ multiple overload candidates will have multiple entries, listed
+ contiguously, in the ALTIVEC_PIM_TABLE below. When the
+ rs6000_fold_builtin() routine is called, it will first point at
+ the first entry. If any of the pim_ovl_... flags is set for this
+ entry, the argument(s) to rs6000_fold_builtin() will be type-checked
+ accordingly. If the check succeeds, the current entry will be
+ used to rewrite the PIM instruction into a __builtin instruction;
+ if the check fails, the next entry in ALTIVEC_PIM_TABLE is selected
+ and the pim_ovl_... type comparison is made again. */
+
+ pim_ovl_16 = 4, /* First argument must be a 16-element vector */
+ pim_ovl_16u = 8,
+ pim_ovl_8 = 12, /* First argument must be an 8-element vector */
+ pim_ovl_8u = 16,
+ pim_ovl_8p = 20, /* First argument must be a vector pixel */
+ pim_ovl_4 = 24, /* First argument must be a 4-element vector */
+ pim_ovl_4u = 28,
+ pim_ovl_4f = 32, /* First argument must be a vector float */
+ pim_ovl_16u_16u = 36, /* First two args must be unsigned 16-el vectors */
+ pim_ovl_8u_8u = 40,
+ pim_ovl_4u_4u = 44,
+ pim_ovl_pqi_2 = 48, /* Second argument must be a pointer to QI. */
+ pim_ovl_phi_2 = 52, /* Second argument must be a pointer to HI. */
+ pim_ovl_psi_2 = 56, /* Second argument must be a pointer to SI. */
+ pim_ovl_MASK = pim_ovl_16 | pim_ovl_16u | pim_ovl_8 | pim_ovl_8u
+ | pim_ovl_8p | pim_ovl_4 | pim_ovl_4u | pim_ovl_4f
+ | pim_ovl_16u_16u | pim_ovl_8u_8u | pim_ovl_4u_4u
+ | pim_ovl_pqi_2 | pim_ovl_phi_2 | pim_ovl_psi_2,
+
+ /* Return type computation. For some operations/predicates, the return
+ type is not always the same (in which case it will be stored
+ in the ALTIVEC_PIM_table), but rather is a function of the arguments
+ supplied. */
+
+ pim_rt_12 = 512, /* Covariant with first two arguments. */
+ pim_rt_2p = 1024, /* Covariant with pointee of second argument. */
+ pim_rt_1 = 1536, /* Covariant with first argument only. */
+ pim_rt_1d = 2048, /* Double the vector element size of first arg. */
+ pim_rt_1h = 2560, /* Halve the vector element size of first arg. */
+ pim_rt_MASK = pim_rt_12 | pim_rt_2p | pim_rt_1 | pim_rt_1d | pim_rt_1h,
+
+ /* Argument manipulation. Before the __builtin instructions are called,
+ the arguments may need to be rearranged. In addition, for all
+ predicates, one of the CR6 values will be prepended to the argument
+ list (see pim_cr6_... above). */
+
+ pim_manip_swap = 8192, /* Swap the first two arguments. */
+ pim_manip_dup = 16384, /* Duplicate first argument. */
+ pim_manip_MASK = pim_manip_swap | pim_manip_dup,
+
+ /* Mark the beginning of instruction groups. For our purposes, an
+ instruction group is the collection of overload candidates for
+ a particular instruction or predicate. For example, the entries
+ "vec_abss", "vec_abss.2" and "vec_abss.3" defined in
+ altivec_init_builtins() below constitute a group, as does the
+ singleton "vec_addc" entry. */
+
+ pim_group = 32768
+};
+
+struct altivec_pim_info GTY(())
+{
+ tree rettype; /* Return type (unless pim_rt_... flags are used). */
+ int insn; /* FUNCTION_DECL_CODE of the underlying '__builtin_...'. */
+ enum pim_flags flags; /* See 'enum pim_flags' above. */
+};
+
+static GTY(()) struct altivec_pim_info
+altivec_pim_table[ALTIVEC_PIM__LAST - ALTIVEC_PIM__FIRST + 1];
+
+#define def_pim_builtin(NAME, TYPE, INSN, FLAGS) \
+do { \
+ lang_hooks.builtin_function (NAME, int_ftype_ellipsis, pim_code, \
+ BUILT_IN_MD, NULL, NULL_TREE); \
+ \
+ altivec_pim_table[pim_code - ALTIVEC_PIM__FIRST].rettype = TYPE; \
+ altivec_pim_table[pim_code - ALTIVEC_PIM__FIRST].insn \
+ = ALTIVEC_BUILTIN_##INSN; \
+ altivec_pim_table[pim_code - ALTIVEC_PIM__FIRST].flags = FLAGS; \
+ \
+ ++pim_code; \
+} while (0)
+/* APPLE LOCAL end AltiVec */
/* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
@@ -6355,12 +6982,14 @@ static struct builtin_description bdesc_2arg[] =
{ MASK_ALTIVEC, CODE_FOR_altivec_vspltb, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB },
{ MASK_ALTIVEC, CODE_FOR_altivec_vsplth, "__builtin_altivec_vsplth", ALTIVEC_BUILTIN_VSPLTH },
{ MASK_ALTIVEC, CODE_FOR_altivec_vspltw, "__builtin_altivec_vspltw", ALTIVEC_BUILTIN_VSPLTW },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsrb, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsrh, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsrw, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsrab, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsrah, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH },
- { MASK_ALTIVEC, CODE_FOR_altivec_vsraw, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW },
+ /* APPLE LOCAL begin mainline 2005-04-05 3972515 */
+ { MASK_ALTIVEC, CODE_FOR_lshrv16qi3, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB },
+ { MASK_ALTIVEC, CODE_FOR_lshrv8hi3, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH },
+ { MASK_ALTIVEC, CODE_FOR_lshrv4si3, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW },
+ { MASK_ALTIVEC, CODE_FOR_ashrv16qi3, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB },
+ { MASK_ALTIVEC, CODE_FOR_ashrv8hi3, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH },
+ { MASK_ALTIVEC, CODE_FOR_ashrv4si3, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW },
+ /* APPLE LOCAL end mainline 2005-04-05 3972515 */
{ MASK_ALTIVEC, CODE_FOR_altivec_vsr, "__builtin_altivec_vsr", ALTIVEC_BUILTIN_VSR },
{ MASK_ALTIVEC, CODE_FOR_altivec_vsro, "__builtin_altivec_vsro", ALTIVEC_BUILTIN_VSRO },
{ MASK_ALTIVEC, CODE_FOR_subv16qi3, "__builtin_altivec_vsububm", ALTIVEC_BUILTIN_VSUBUBM },
@@ -6661,6 +7290,382 @@ static struct builtin_description bdesc_1arg[] =
{ 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW },
};
+/* APPLE LOCAL begin AltiVec */
+/* Determine the return type from types T1 and T2 of the first two arguments.
+ This is required for some of the AltiVec PIM operations/predicates. */
+
+static tree
+altivec_cov_rt_12 (tree t1, tree t2)
+{
+ /* NB: The ordering of the following statements is important.
+ Matching of more specific types (e.g., 'vector pixel') should
+ precede matching of more general types, esp. if they subsume the
+ former (e.g., 'vector of 8 elements'). */
+
+#define RETURN_IF_EITHER_IS(TYPE) if (t1 == TYPE || t2 == TYPE) return TYPE
+
+ RETURN_IF_EITHER_IS (unsigned_V16QI_type_node);
+ RETURN_IF_EITHER_IS (V16QI_type_node);
+ RETURN_IF_EITHER_IS (bool_V16QI_type_node);
+ RETURN_IF_EITHER_IS (unsigned_V8HI_type_node);
+ RETURN_IF_EITHER_IS (pixel_V8HI_type_node);
+ RETURN_IF_EITHER_IS (V8HI_type_node);
+ RETURN_IF_EITHER_IS (bool_V8HI_type_node);
+ RETURN_IF_EITHER_IS (unsigned_V4SI_type_node);
+ RETURN_IF_EITHER_IS (V4SF_type_node);
+ RETURN_IF_EITHER_IS (V4SI_type_node);
+ RETURN_IF_EITHER_IS (bool_V4SI_type_node);
+
+#undef RETURN_IF_EITHER_IS
+
+ return NULL_TREE;
+}
+
+/* Determine the return type from the pointee type of argument type T.
+ This is required for some of the AltiVec PIM operations/predicates. */
+
+static tree
+altivec_cov_rt_2p (tree t)
+{
+ /* Must be a pointer. */
+
+ if (TREE_CODE (t) != POINTER_TYPE)
+ return NULL_TREE;
+
+ t = TYPE_MAIN_VARIANT (TREE_TYPE (t));
+
+ /* For pointers to vectors, the return type is the vector itself. */
+
+ if (TREE_CODE (t) == VECTOR_TYPE)
+ return t;
+
+ switch (TYPE_MODE (t))
+ {
+ case QImode:
+ return TYPE_UNSIGNED (t) ? unsigned_V16QI_type_node : V16QI_type_node;
+
+ case HImode:
+ return TYPE_UNSIGNED (t) ? unsigned_V8HI_type_node : V8HI_type_node;
+
+ case SImode:
+ return TYPE_UNSIGNED (t) ? unsigned_V4SI_type_node : V4SI_type_node;
+
+ case SFmode:
+ return V4SF_type_node;
+
+ default:
+ return NULL_TREE;
+ }
+}
+
+/* Determine the return type from type T by doubling the size of its
+ constituent vector elements. This is required for some of the AltiVec
+ PIM operations/predicates. */
+
+static tree
+altivec_cov_rt_1d (tree t)
+{
+ if (t == V16QI_type_node)
+ return V8HI_type_node;
+ else if (t == unsigned_V16QI_type_node)
+ return unsigned_V8HI_type_node;
+ else if (t == bool_V16QI_type_node)
+ return bool_V8HI_type_node;
+ else if (t == V8HI_type_node)
+ return V4SI_type_node;
+ else if (t == unsigned_V8HI_type_node || t == pixel_V8HI_type_node)
+ return unsigned_V4SI_type_node;
+ else if (t == bool_V8HI_type_node)
+ return bool_V4SI_type_node;
+ else
+ return NULL_TREE; /* Invalid argument. */
+}
+
+/* Determine the return type from type T by halving the size of its
+ constituent vector elements. This is required for some of the AltiVec
+ PIM operations/predicates. */
+
+static tree
+altivec_cov_rt_1h (tree t)
+{
+ if (t == V8HI_type_node)
+ return V16QI_type_node;
+ else if (t == unsigned_V8HI_type_node || t == pixel_V8HI_type_node)
+ return unsigned_V16QI_type_node;
+ else if (t == bool_V8HI_type_node)
+ return bool_V16QI_type_node;
+ else if (t == V4SI_type_node)
+ return V8HI_type_node;
+ else if (t == unsigned_V4SI_type_node)
+ return unsigned_V8HI_type_node;
+ else if (t == bool_V4SI_type_node)
+ return bool_V8HI_type_node;
+ else
+ return NULL_TREE; /* Invalid argument. */
+}
+
+/* Given the types T1 and T2 of the first two arguments, and INFO pointing
+ to the first of available overload candidates (in the ALTIVEC_PIM_TABLE)
+ for an AltiVec PIM operation or predicate, select a desired overload
+ candidate by incrementing and returning INFO as appropriate. If no
+ overload candidate is suitable, return NULL. */
+
+static struct altivec_pim_info *
+altivec_ovl_resolve (struct altivec_pim_info *info, tree t1, tree t2)
+{
+ /* Make sure we have all the types that we need. */
+ if (!t1 || (!t2 && (info->flags & pim_ovl_MASK) >= pim_ovl_16u_16u))
+ return 0;
+
+ /* Examine overload candidates in order, and return the first one
+ that matches. For this scheme to work, overload candidates must
+ be ordered from most to least type-specific. */
+ do
+ {
+ switch (info->flags & pim_ovl_MASK)
+ {
+
+#define OVL_MATCH(EXPR) if (EXPR) return info; break
+
+ case pim_ovl_16:
+ OVL_MATCH (TYPE_MODE (t1) == V16QImode);
+
+ case pim_ovl_16u:
+ OVL_MATCH (TYPE_MODE (t1) == V16QImode && TYPE_UNSIGNED (t1));
+
+ case pim_ovl_8:
+ OVL_MATCH (TYPE_MODE (t1) == V8HImode);
+
+ case pim_ovl_8u:
+ OVL_MATCH (TYPE_MODE (t1) == V8HImode && TYPE_UNSIGNED (t1));
+
+ case pim_ovl_8p:
+ OVL_MATCH (t1 == pixel_V8HI_type_node);
+
+ case pim_ovl_4:
+ OVL_MATCH (TYPE_MODE (t1) == V4SImode || TYPE_MODE (t1) == V4SFmode);
+
+ case pim_ovl_4u:
+ OVL_MATCH (TYPE_MODE (t1) == V4SImode && TYPE_UNSIGNED (t1));
+
+ case pim_ovl_4f:
+ OVL_MATCH (TYPE_MODE (t1) == V4SFmode);
+
+ case pim_ovl_16u_16u:
+ OVL_MATCH (t1 == unsigned_V16QI_type_node
+ || t2 == unsigned_V16QI_type_node);
+
+ case pim_ovl_8u_8u:
+ OVL_MATCH (t1 == unsigned_V8HI_type_node
+ || t1 == pixel_V8HI_type_node
+ || t2 == unsigned_V8HI_type_node
+ || t2 == pixel_V8HI_type_node);
+
+ case pim_ovl_4u_4u:
+ OVL_MATCH (t1 == unsigned_V4SI_type_node
+ || t2 == unsigned_V4SI_type_node);
+
+ case pim_ovl_pqi_2:
+ OVL_MATCH (TREE_CODE (t2) == POINTER_TYPE
+ && (TYPE_MODE (TREE_TYPE (t2)) == QImode
+ || TYPE_MODE (TREE_TYPE (t2)) == V16QImode));
+
+ case pim_ovl_phi_2:
+ OVL_MATCH (TREE_CODE (t2) == POINTER_TYPE
+ && (TYPE_MODE (TREE_TYPE (t2)) == HImode
+ || TYPE_MODE (TREE_TYPE (t2)) == V8HImode));
+
+ case pim_ovl_psi_2:
+ OVL_MATCH (TREE_CODE (t2) == POINTER_TYPE
+ && (TYPE_MODE (TREE_TYPE (t2)) == SImode
+ || TYPE_MODE (TREE_TYPE (t2)) == V4SImode
+ || TYPE_MODE (TREE_TYPE (t2)) == SFmode
+ || TYPE_MODE (TREE_TYPE (t2)) == V4SFmode));
+
+ default: /* Catch-all. */
+ return info;
+
+#undef OVL_MATCH
+ }
+ }
+ while (!((++info)->flags & pim_group)); /* Advance to next candidate. */
+
+ return NULL; /* No suitable overload candidate found. */
+}
+
+/* Convert each function argument in the ARGS list into a corresponding
+ type found in the TYPES list. This must be done before calling the
+ __builtin_... AltiVec instructions, whose declared argument types may differ
+ from what was passed to rs6000_fold_builtin(). */
+
+static tree
+altivec_convert_args (tree types, tree args)
+{
+ tree t, a;
+
+ for (t = types, a = args; t && a; t = TREE_CHAIN (t), a = TREE_CHAIN (a))
+ {
+ TREE_VALUE (a) = convert (TREE_VALUE (t), TREE_VALUE (a));
+
+ /* Suppress overflows, so that GIMPLE does not create temporary
+ variables on us. */
+ if (TREE_CODE (TREE_VALUE (a)) == INTEGER_CST)
+ {
+ TREE_OVERFLOW (TREE_VALUE (a)) = 0;
+ TREE_CONSTANT_OVERFLOW (TREE_VALUE (a)) = 0;
+ }
+ }
+
+ return args;
+}
+
+/* The following function rewrites EXP by substituting AltiVec PIM operations
+ or predicates with built-in instructions defined above. Type casts are
+ provided if needed. */
+
+tree
+rs6000_fold_builtin (tree exp, bool ARG_UNUSED (ignore))
+{
+ tree fndecl, arglist, rettype;
+ tree typ1 = NULL_TREE, typ2 = NULL_TREE;
+ int fcode, ovl_error = 0;
+ struct altivec_pim_info *info;
+
+ /* Bail out if not in Apple AltiVec mode. */
+ if (!rs6000_altivec_pim)
+ return NULL_TREE;
+
+ fndecl = get_callee_fndecl (exp);
+ fcode = DECL_FUNCTION_CODE (fndecl);
+
+ /* Bail out unless we are looking at one of the AltiVec PIM
+ operations/predicates. */
+
+ if (fcode < ALTIVEC_PIM__FIRST || fcode > ALTIVEC_PIM__LAST)
+ return NULL_TREE;
+
+ /* Point at the first (and possibly only) entry in ALTIVEC_PIM_TABLE
+ describing this PIM operation/predicate, and how to convert it to
+ a __builtin_... call. */
+
+ info = altivec_pim_table + (fcode - ALTIVEC_PIM__FIRST);
+
+ /* Separate out the argument types for further analysis. */
+
+ arglist = TREE_OPERAND (exp, 1);
+
+ if (arglist)
+ typ1 = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (arglist)));
+
+ if (arglist && TREE_CHAIN (arglist))
+ typ2 = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist))));
+
+ /* Select from a list of overloaded functions, if needed. */
+
+ if (info->flags & pim_ovl_MASK)
+ {
+ info = altivec_ovl_resolve (info, typ1, typ2);
+
+ if (!info)
+ {
+ /* No suitable overload candidate was found! */
+ ovl_error = 1; /* We use this to indicate error. */
+ /* Point at the first overload candidate again. */
+ info = altivec_pim_table + (fcode - ALTIVEC_PIM__FIRST);
+ }
+ }
+
+ /* Determine the return type. */
+
+ switch (info->flags & pim_rt_MASK)
+ {
+ case pim_rt_12:
+ /* Return type is covariant with the first two arguments. */
+ rettype = altivec_cov_rt_12 (typ1, typ2);
+ break;
+
+ /* Return type is covariant with pointee of second argument. */
+ case pim_rt_2p:
+ rettype = altivec_cov_rt_2p (typ2);
+ break;
+
+ /* Return type is covariant with the first argument only. */
+ case pim_rt_1:
+ rettype = typ1;
+ break;
+
+ /* Return type is covariant with first argument, but with doubled
+ vector element sizes. */
+ case pim_rt_1d:
+ rettype = altivec_cov_rt_1d (typ1);
+ break;
+
+ /* Return type is covariant with first argument, but with halved
+ vector element sizes. */
+ case pim_rt_1h:
+ rettype = altivec_cov_rt_1h (typ1);
+ break;
+
+ default:
+ /* Retrieve return type to use from ALTIVEC_PIM_TABLE. */
+ rettype = info->rettype;
+ }
+
+ /* Rearrange arguments, as needed. */
+
+ switch (info->flags & pim_manip_MASK)
+ {
+ case pim_manip_swap:
+ if (!typ1 || !typ2)
+ rettype = NULL_TREE;
+ else
+ {
+ tree swap = TREE_VALUE (arglist);
+
+ TREE_VALUE (arglist) = TREE_VALUE (TREE_CHAIN (arglist));
+ TREE_VALUE (TREE_CHAIN (arglist)) = swap;
+ }
+
+ break;
+
+ case pim_manip_dup:
+ if (!typ1 || typ2)
+ rettype = NULL_TREE;
+ else
+ TREE_CHAIN (arglist) = tree_cons (NULL_TREE, TREE_VALUE (arglist),
+ NULL_TREE);
+
+ break;
+ }
+
+ /* For predicates, prepend the proper CR6 value to the argument list. */
+
+ if (fcode >= ALTIVEC_PIM_VEC_ALL_EQ)
+ arglist = tree_cons (NULL_TREE,
+ build_int_cst (NULL_TREE, info->flags & pim_cr6_MASK),
+ arglist);
+
+ /* If we could not properly determine an overload candidate or a return type,
+ issue an error. */
+
+ if (ovl_error || !rettype)
+ {
+ error ("invalid argument(s) for AltiVec operation or predicate");
+ /* Choose the return type for the first overload candidate, if
+ a type has been provided. Otherwise, use 'vector signed int'. */
+ rettype = info->rettype ? info->rettype : V4SI_type_node;
+ }
+
+ /* Retrieve the underlying AltiVec __builtin_... to call, and call it. */
+
+ fndecl = vector_builtin_fns [info->insn];
+ arglist = altivec_convert_args (TYPE_ARG_TYPES (TREE_TYPE (fndecl)),
+ arglist);
+
+ return convert (rettype, build_function_call_expr (fndecl, arglist));
+}
+/* APPLE LOCAL end AltiVec */
+
static rtx
rs6000_expand_unop_builtin (enum insn_code icode, tree arglist, rtx target)
{
@@ -7924,6 +8929,12 @@ rs6000_init_builtins (void)
altivec_init_builtins ();
if (TARGET_ALTIVEC || TARGET_SPE)
rs6000_common_init_builtins ();
+
+ /* APPLE LOCAL begin constant cfstrings */
+#ifdef SUBTARGET_INIT_BUILTINS
+ SUBTARGET_INIT_BUILTINS;
+#endif
+ /* APPLE LOCAL end constant cfstrings */
}
/* Search through a set of builtins and enable the mask bits.
@@ -8392,6 +9403,716 @@ altivec_init_builtins (void)
/* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
altivec_builtin_mask_for_load = decl;
}
+
+ /* APPLE LOCAL begin AltiVec */
+ /* If Apple AltiVec is enabled, we need to define additional builtins
+ in lieu of what <altivec.h> provides for FSF AltiVec. */
+ if (rs6000_altivec_pim)
+ {
+ tree int_ftype_ellipsis = build_function_type (integer_type_node,
+ NULL_TREE);
+ int pim_code = ALTIVEC_PIM__FIRST;
+
+ /* NB: For overloaded operations/predicates, the pim_... flags specify
+ how to match up the argument types and how to determine the
+ return type, if necessary; the rs6000_fold_builtin() routine
+ does all this. */
+
+ /* PIM Operations. */
+
+ gcc_assert (pim_code == ALTIVEC_PIM_VEC_ABS);
+
+ def_pim_builtin ("vec_abs", V16QI_type_node, ABS_V16QI, pim_ovl_16 | pim_group);
+ def_pim_builtin ("vec_abs.2", V8HI_type_node, ABS_V8HI, pim_ovl_8);
+ def_pim_builtin ("vec_abs.3", V4SF_type_node, ABS_V4SF, pim_ovl_4f);
+ def_pim_builtin ("vec_abs.4", V4SI_type_node, ABS_V4SI, pim_ovl_4);
+
+ def_pim_builtin ("vec_abss", V16QI_type_node, ABSS_V16QI, pim_ovl_16 | pim_group);
+ def_pim_builtin ("vec_abss.2", V8HI_type_node, ABSS_V8HI, pim_ovl_8);
+ def_pim_builtin ("vec_abss.3", V4SI_type_node, ABSS_V4SI, pim_ovl_4);
+
+ def_pim_builtin ("vec_add", NULL_TREE, VADDUBM, pim_ovl_16 | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_add.2", NULL_TREE, VADDUHM, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_add.3", V4SF_type_node, VADDFP, pim_ovl_4f);
+ def_pim_builtin ("vec_add.4", NULL_TREE, VADDUWM, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_addc", unsigned_V4SI_type_node, VADDCUW, pim_group);
+
+ def_pim_builtin ("vec_adds", NULL_TREE, VADDUBS, pim_ovl_16u_16u | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_adds.2", NULL_TREE, VADDSBS, pim_ovl_16 | pim_rt_12);
+ def_pim_builtin ("vec_adds.3", NULL_TREE, VADDUHS, pim_ovl_8u_8u | pim_rt_12);
+ def_pim_builtin ("vec_adds.4", NULL_TREE, VADDSHS, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_adds.5", NULL_TREE, VADDUWS, pim_ovl_4u_4u | pim_rt_12);
+ def_pim_builtin ("vec_adds.6", NULL_TREE, VADDSWS, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_and", NULL_TREE, VAND, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_andc", NULL_TREE, VANDC, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_avg", NULL_TREE, VAVGUB, pim_ovl_16u | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_avg.2", NULL_TREE, VAVGSB, pim_ovl_16 | pim_rt_12);
+ def_pim_builtin ("vec_avg.3", NULL_TREE, VAVGUH, pim_ovl_8u | pim_rt_12);
+ def_pim_builtin ("vec_avg.4", NULL_TREE, VAVGSH, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_avg.5", NULL_TREE, VAVGUW, pim_ovl_4u | pim_rt_12);
+ def_pim_builtin ("vec_avg.6", NULL_TREE, VAVGSW, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_ceil", V4SF_type_node, VRFIP, pim_group);
+
+ def_pim_builtin ("vec_cmpb", V4SI_type_node, VCMPBFP, pim_group);
+
+ def_pim_builtin ("vec_cmpeq", bool_V16QI_type_node, VCMPEQUB, pim_ovl_16 | pim_group);
+ def_pim_builtin ("vec_cmpeq.2", bool_V8HI_type_node, VCMPEQUH, pim_ovl_8);
+ def_pim_builtin ("vec_cmpeq.3", bool_V4SI_type_node, VCMPEQFP, pim_ovl_4f);
+ def_pim_builtin ("vec_cmpeq.4", bool_V4SI_type_node, VCMPEQUW, pim_ovl_4);
+
+ def_pim_builtin ("vec_cmpge", bool_V4SI_type_node, VCMPGEFP, pim_group);
+
+ def_pim_builtin ("vec_cmpgt", bool_V16QI_type_node, VCMPGTUB, pim_ovl_16u | pim_group);
+ def_pim_builtin ("vec_cmpgt.2", bool_V16QI_type_node, VCMPGTSB, pim_ovl_16);
+ def_pim_builtin ("vec_cmpgt.3", bool_V8HI_type_node, VCMPGTUH, pim_ovl_8u);
+ def_pim_builtin ("vec_cmpgt.4", bool_V8HI_type_node, VCMPGTSH, pim_ovl_8);
+ def_pim_builtin ("vec_cmpgt.5", bool_V4SI_type_node, VCMPGTFP, pim_ovl_4f);
+ def_pim_builtin ("vec_cmpgt.6", bool_V4SI_type_node, VCMPGTUW, pim_ovl_4u);
+ def_pim_builtin ("vec_cmpgt.7", bool_V4SI_type_node, VCMPGTSW, pim_ovl_4);
+
+ def_pim_builtin ("vec_cmple", bool_V4SI_type_node, VCMPGEFP, pim_manip_swap | pim_group);
+
+ def_pim_builtin ("vec_cmplt", bool_V16QI_type_node, VCMPGTUB, pim_ovl_16u | pim_manip_swap | pim_group);
+ def_pim_builtin ("vec_cmplt.2", bool_V16QI_type_node, VCMPGTSB, pim_ovl_16 | pim_manip_swap);
+ def_pim_builtin ("vec_cmplt.3", bool_V8HI_type_node, VCMPGTUH, pim_ovl_8u | pim_manip_swap);
+ def_pim_builtin ("vec_cmplt.4", bool_V8HI_type_node, VCMPGTSH, pim_ovl_8 | pim_manip_swap);
+ def_pim_builtin ("vec_cmplt.5", bool_V4SI_type_node, VCMPGTFP, pim_ovl_4f | pim_manip_swap);
+ def_pim_builtin ("vec_cmplt.6", bool_V4SI_type_node, VCMPGTUW, pim_ovl_4u | pim_manip_swap);
+ def_pim_builtin ("vec_cmplt.7", bool_V4SI_type_node, VCMPGTSW, pim_ovl_4 | pim_manip_swap);
+
+ def_pim_builtin ("vec_ctf", V4SF_type_node, VCFUX, pim_ovl_4u | pim_group);
+ def_pim_builtin ("vec_ctf.2", V4SF_type_node, VCFSX, pim_ovl_4);
+
+ def_pim_builtin ("vec_cts", V4SI_type_node, VCTSXS, pim_ovl_4f | pim_group);
+
+ def_pim_builtin ("vec_ctu", unsigned_V4SI_type_node, VCTUXS, pim_ovl_4f | pim_group);
+
+ def_pim_builtin ("vec_dss", void_type_node, DSS, pim_group);
+
+ def_pim_builtin ("vec_dssall", void_type_node, DSSALL, pim_group);
+
+ def_pim_builtin ("vec_dst", void_type_node, DST, pim_group);
+
+ def_pim_builtin ("vec_dstst", void_type_node, DSTST, pim_group);
+
+ def_pim_builtin ("vec_dststt", void_type_node, DSTSTT, pim_group);
+
+ def_pim_builtin ("vec_dstt", void_type_node, DSTT, pim_group);
+
+ def_pim_builtin ("vec_expte", V4SF_type_node, VEXPTEFP, pim_group);
+
+ def_pim_builtin ("vec_floor", V4SF_type_node, VRFIM, pim_group);
+
+ def_pim_builtin ("vec_ld", NULL_TREE, LVX, pim_rt_2p | pim_group);
+
+ def_pim_builtin ("vec_lde", NULL_TREE, LVEBX, pim_ovl_pqi_2 | pim_rt_2p | pim_group);
+ def_pim_builtin ("vec_lde.2", NULL_TREE, LVEHX, pim_ovl_phi_2 | pim_rt_2p);
+ def_pim_builtin ("vec_lde.3", NULL_TREE, LVEWX, pim_ovl_psi_2 | pim_rt_2p);
+
+ def_pim_builtin ("vec_ldl", NULL_TREE, LVXL, pim_rt_2p | pim_group);
+
+ def_pim_builtin ("vec_loge", V4SF_type_node, VLOGEFP, pim_group);
+
+ def_pim_builtin ("vec_lvebx", NULL_TREE, LVEBX, pim_rt_2p | pim_group);
+ def_pim_builtin ("vec_lvehx", NULL_TREE, LVEHX, pim_rt_2p | pim_group);
+ def_pim_builtin ("vec_lvewx", NULL_TREE, LVEWX, pim_rt_2p | pim_group);
+
+ def_pim_builtin ("vec_lvsl", unsigned_V16QI_type_node, LVSL, pim_group);
+
+ def_pim_builtin ("vec_lvsr", unsigned_V16QI_type_node, LVSR, pim_group);
+
+ def_pim_builtin ("vec_lvx", NULL_TREE, LVX, pim_rt_2p | pim_group);
+
+ def_pim_builtin ("vec_lvxl", NULL_TREE, LVXL, pim_rt_2p | pim_group);
+
+ def_pim_builtin ("vec_madd", V4SF_type_node, VMADDFP, pim_group);
+
+ def_pim_builtin ("vec_madds", V8HI_type_node, VMHADDSHS, pim_group);
+
+ def_pim_builtin ("vec_max", NULL_TREE, VMAXUB, pim_ovl_16u_16u | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_max.2", NULL_TREE, VMAXSB, pim_ovl_16 | pim_rt_12);
+ def_pim_builtin ("vec_max.3", NULL_TREE, VMAXUH, pim_ovl_8u_8u | pim_rt_12);
+ def_pim_builtin ("vec_max.4", NULL_TREE, VMAXSH, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_max.5", NULL_TREE, VMAXFP, pim_ovl_4f | pim_rt_12);
+ def_pim_builtin ("vec_max.6", NULL_TREE, VMAXUW, pim_ovl_4u_4u | pim_rt_12);
+ def_pim_builtin ("vec_max.7", NULL_TREE, VMAXSW, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_mergeh", NULL_TREE, VMRGHB, pim_ovl_16 | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_mergeh.2", NULL_TREE, VMRGHH, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_mergeh.3", NULL_TREE, VMRGHW, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_mergel", NULL_TREE, VMRGLB, pim_ovl_16 | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_mergel.2", NULL_TREE, VMRGLH, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_mergel.3", NULL_TREE, VMRGLW, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_mfvscr", unsigned_V8HI_type_node, MFVSCR, pim_group);
+
+ def_pim_builtin ("vec_min", NULL_TREE, VMINUB, pim_ovl_16u_16u | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_min.2", NULL_TREE, VMINSB, pim_ovl_16 | pim_rt_12);
+ def_pim_builtin ("vec_min.3", NULL_TREE, VMINUH, pim_ovl_8u_8u | pim_rt_12);
+ def_pim_builtin ("vec_min.4", NULL_TREE, VMINSH, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_min.5", NULL_TREE, VMINFP, pim_ovl_4f | pim_rt_12);
+ def_pim_builtin ("vec_min.6", NULL_TREE, VMINUW, pim_ovl_4u_4u | pim_rt_12);
+ def_pim_builtin ("vec_min.7", NULL_TREE, VMINSW, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_mladd", unsigned_V8HI_type_node, VMLADDUHM, pim_ovl_8u_8u | pim_group);
+ def_pim_builtin ("vec_mladd.2", V8HI_type_node, VMLADDUHM, pim_ovl_8);
+
+ def_pim_builtin ("vec_mradds", V8HI_type_node, VMHRADDSHS, pim_group);
+
+ def_pim_builtin ("vec_msum", unsigned_V4SI_type_node, VMSUMUBM, pim_ovl_16u | pim_group);
+ def_pim_builtin ("vec_msum.2", V4SI_type_node, VMSUMMBM, pim_ovl_16);
+ def_pim_builtin ("vec_msum.3", unsigned_V4SI_type_node, VMSUMUHM, pim_ovl_8u);
+ def_pim_builtin ("vec_msum.4", V4SI_type_node, VMSUMSHM, pim_ovl_8);
+
+ def_pim_builtin ("vec_msums", unsigned_V4SI_type_node, VMSUMUHS, pim_ovl_8u | pim_group);
+ def_pim_builtin ("vec_msums.2", V4SI_type_node, VMSUMSHS, pim_ovl_8);
+
+ def_pim_builtin ("vec_mtvscr", void_type_node, MTVSCR, pim_group);
+
+ def_pim_builtin ("vec_mule", unsigned_V8HI_type_node, VMULEUB, pim_ovl_16u | pim_group);
+ def_pim_builtin ("vec_mule.2", V8HI_type_node, VMULESB, pim_ovl_16);
+ def_pim_builtin ("vec_mule.3", unsigned_V4SI_type_node, VMULEUH, pim_ovl_8u);
+ def_pim_builtin ("vec_mule.4", V4SI_type_node, VMULESH, pim_ovl_8);
+
+ def_pim_builtin ("vec_mulo", unsigned_V8HI_type_node, VMULOUB, pim_ovl_16u | pim_group);
+ def_pim_builtin ("vec_mulo.2", V8HI_type_node, VMULOSB, pim_ovl_16);
+ def_pim_builtin ("vec_mulo.3", unsigned_V4SI_type_node, VMULOUH, pim_ovl_8u);
+ def_pim_builtin ("vec_mulo.4", V4SI_type_node, VMULOSH, pim_ovl_8);
+
+ def_pim_builtin ("vec_nmsub", V4SF_type_node, VNMSUBFP, pim_group);
+
+ def_pim_builtin ("vec_nor", NULL_TREE, VNOR, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_or", NULL_TREE, VOR, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_pack", NULL_TREE, VPKUHUM, pim_ovl_8 | pim_rt_1h | pim_group);
+ def_pim_builtin ("vec_pack.2", NULL_TREE, VPKUWUM, pim_ovl_4 | pim_rt_1h);
+
+ def_pim_builtin ("vec_packpx", pixel_V8HI_type_node, VPKPX, pim_group);
+
+ def_pim_builtin ("vec_packs", unsigned_V16QI_type_node, VPKUHUS, pim_ovl_8u | pim_group);
+ def_pim_builtin ("vec_packs.2", V16QI_type_node, VPKSHSS, pim_ovl_8);
+ def_pim_builtin ("vec_packs.3", unsigned_V8HI_type_node, VPKUWUS, pim_ovl_4u);
+ def_pim_builtin ("vec_packs.4", V8HI_type_node, VPKSWSS, pim_ovl_4);
+
+ def_pim_builtin ("vec_packsu", unsigned_V16QI_type_node, VPKUHUS, pim_ovl_8u | pim_group);
+ def_pim_builtin ("vec_packsu.2", unsigned_V16QI_type_node, VPKSHUS, pim_ovl_8);
+ def_pim_builtin ("vec_packsu.3", unsigned_V8HI_type_node, VPKUWUS, pim_ovl_4u);
+ def_pim_builtin ("vec_packsu.4", unsigned_V8HI_type_node, VPKSWUS, pim_ovl_4);
+
+ def_pim_builtin ("vec_perm", V16QI_type_node, VPERM_4SI, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_re", V4SF_type_node, VREFP, pim_group);
+
+ def_pim_builtin ("vec_rl", NULL_TREE, VRLB, pim_ovl_16 | pim_rt_1 | pim_group);
+ def_pim_builtin ("vec_rl.2", NULL_TREE, VRLH, pim_ovl_8 | pim_rt_1);
+ def_pim_builtin ("vec_rl.3", NULL_TREE, VRLW, pim_ovl_4 | pim_rt_1);
+
+ def_pim_builtin ("vec_round", V4SF_type_node, VRFIN, pim_group);
+
+ def_pim_builtin ("vec_rsqrte", V4SF_type_node, VRSQRTEFP, pim_group);
+
+ def_pim_builtin ("vec_sel", NULL_TREE, VSEL_4SI, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_sl", NULL_TREE, VSLB, pim_ovl_16 | pim_rt_1 | pim_group);
+ def_pim_builtin ("vec_sl.2", NULL_TREE, VSLH, pim_ovl_8 | pim_rt_1);
+ def_pim_builtin ("vec_sl.3", NULL_TREE, VSLW, pim_ovl_4 | pim_rt_1);
+
+ def_pim_builtin ("vec_sld", NULL_TREE, VSLDOI_4SI, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_sll", NULL_TREE, VSL, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_slo", NULL_TREE, VSLO, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_splat", NULL_TREE, VSPLTB, pim_ovl_16 | pim_rt_1 | pim_group);
+ def_pim_builtin ("vec_splat.2", NULL_TREE, VSPLTH, pim_ovl_8 | pim_rt_1);
+ def_pim_builtin ("vec_splat.3", NULL_TREE, VSPLTW, pim_ovl_4 | pim_rt_1);
+
+ def_pim_builtin ("vec_splat_s8", V16QI_type_node, VSPLTISB, pim_group);
+
+ def_pim_builtin ("vec_splat_s16", V8HI_type_node, VSPLTISH, pim_group);
+
+ def_pim_builtin ("vec_splat_s32", V4SI_type_node, VSPLTISW, pim_group);
+
+ def_pim_builtin ("vec_splat_u8", unsigned_V16QI_type_node, VSPLTISB, pim_group);
+
+ def_pim_builtin ("vec_splat_u16", unsigned_V8HI_type_node, VSPLTISH, pim_group);
+
+ def_pim_builtin ("vec_splat_u32", unsigned_V4SI_type_node, VSPLTISW, pim_group);
+
+ def_pim_builtin ("vec_sr", NULL_TREE, VSRB, pim_ovl_16 | pim_rt_1 | pim_group);
+ def_pim_builtin ("vec_sr.2", NULL_TREE, VSRH, pim_ovl_8 | pim_rt_1);
+ def_pim_builtin ("vec_sr.3", NULL_TREE, VSRW, pim_ovl_4 | pim_rt_1);
+
+ def_pim_builtin ("vec_sra", NULL_TREE, VSRAB, pim_ovl_16 | pim_rt_1 | pim_group);
+ def_pim_builtin ("vec_sra.2", NULL_TREE, VSRAH, pim_ovl_8 | pim_rt_1);
+ def_pim_builtin ("vec_sra.3", NULL_TREE, VSRAW, pim_ovl_4 | pim_rt_1);
+
+ def_pim_builtin ("vec_srl", NULL_TREE, VSR, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_sro", NULL_TREE, VSRO, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_st", void_type_node, STVX, pim_group);
+
+ def_pim_builtin ("vec_ste", void_type_node, STVEBX, pim_ovl_16 | pim_group);
+ def_pim_builtin ("vec_ste.2", void_type_node, STVEHX, pim_ovl_8);
+ def_pim_builtin ("vec_ste.3", void_type_node, STVEWX, pim_ovl_4);
+
+ def_pim_builtin ("vec_stl", void_type_node, STVXL, pim_group);
+
+ def_pim_builtin ("vec_stvebx", void_type_node, STVEBX, pim_group);
+ def_pim_builtin ("vec_stvehx", void_type_node, STVEHX, pim_group);
+ def_pim_builtin ("vec_stvewx", void_type_node, STVEWX, pim_group);
+
+ def_pim_builtin ("vec_stvx", void_type_node, STVX, pim_group);
+
+ def_pim_builtin ("vec_stvxl", void_type_node, STVXL, pim_group);
+
+ def_pim_builtin ("vec_sub", NULL_TREE, VSUBUBM, pim_ovl_16 | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_sub.2", NULL_TREE, VSUBUHM, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_sub.3", NULL_TREE, VSUBFP, pim_ovl_4f | pim_rt_12);
+ def_pim_builtin ("vec_sub.4", NULL_TREE, VSUBUWM, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_subc", unsigned_V4SI_type_node, VSUBCUW, pim_group);
+
+ def_pim_builtin ("vec_subs", NULL_TREE, VSUBUBS, pim_ovl_16u_16u | pim_rt_12 | pim_group);
+ def_pim_builtin ("vec_subs.2", NULL_TREE, VSUBSBS, pim_ovl_16 | pim_rt_12);
+ def_pim_builtin ("vec_subs.3", NULL_TREE, VSUBUHS, pim_ovl_8u_8u | pim_rt_12);
+ def_pim_builtin ("vec_subs.4", NULL_TREE, VSUBSHS, pim_ovl_8 | pim_rt_12);
+ def_pim_builtin ("vec_subs.5", NULL_TREE, VSUBUWS, pim_ovl_4u_4u | pim_rt_12);
+ def_pim_builtin ("vec_subs.6", NULL_TREE, VSUBSWS, pim_ovl_4 | pim_rt_12);
+
+ def_pim_builtin ("vec_sum4s", unsigned_V4SI_type_node, VSUM4UBS, pim_ovl_16u | pim_group);
+ def_pim_builtin ("vec_sum4s.2", V4SI_type_node, VSUM4SBS, pim_ovl_16);
+ def_pim_builtin ("vec_sum4s.3", V4SI_type_node, VSUM4SHS, pim_ovl_8);
+
+ def_pim_builtin ("vec_sum2s", V4SI_type_node, VSUM2SWS, pim_group);
+
+ def_pim_builtin ("vec_sums", V4SI_type_node, VSUMSWS, pim_group);
+
+ def_pim_builtin ("vec_trunc", V4SF_type_node, VRFIZ, pim_group);
+
+ def_pim_builtin ("vec_unpackh", NULL_TREE, VUPKHSB, pim_ovl_16 | pim_rt_1d | pim_group);
+ def_pim_builtin ("vec_unpackh.2", NULL_TREE, VUPKHPX, pim_ovl_8p | pim_rt_1d);
+ def_pim_builtin ("vec_unpackh.3", NULL_TREE, VUPKHSH, pim_ovl_8 | pim_rt_1d);
+
+ def_pim_builtin ("vec_unpackl", NULL_TREE, VUPKLSB, pim_ovl_16 | pim_rt_1d | pim_group);
+ def_pim_builtin ("vec_unpackl.2", NULL_TREE, VUPKLPX, pim_ovl_8p | pim_rt_1d);
+ def_pim_builtin ("vec_unpackl.3", NULL_TREE, VUPKLSH, pim_ovl_8 | pim_rt_1d);
+
+ gcc_assert (pim_code == ALTIVEC_PIM_VEC_VADDCUW);
+
+ def_pim_builtin ("vec_vaddcuw", unsigned_V4SI_type_node, VADDCUW, pim_group);
+
+ def_pim_builtin ("vec_vaddfp", V4SF_type_node, VADDFP, pim_group);
+
+ def_pim_builtin ("vec_vaddsbs", NULL_TREE, VADDSBS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vaddshs", NULL_TREE, VADDSHS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vaddsws", NULL_TREE, VADDSWS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vaddubm", NULL_TREE, VADDUBM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vaddubs", NULL_TREE, VADDUBS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vadduhm", NULL_TREE, VADDUHM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vadduhs", NULL_TREE, VADDUHS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vadduwm", NULL_TREE, VADDUWM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vadduws", NULL_TREE, VADDUWS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vand", NULL_TREE, VAND, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vandc", NULL_TREE, VANDC, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavgsb", NULL_TREE, VAVGSB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavgsh", NULL_TREE, VAVGSH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavgsw", NULL_TREE, VAVGSW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavgub", NULL_TREE, VAVGUB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavguh", NULL_TREE, VAVGUH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vavguw", NULL_TREE, VAVGUW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vcfsx", V4SF_type_node, VCFSX, pim_group);
+
+ def_pim_builtin ("vec_vcfux", V4SF_type_node, VCFUX, pim_group);
+
+ def_pim_builtin ("vec_vcmpbfp", V4SI_type_node, VCMPBFP, pim_group);
+
+ def_pim_builtin ("vec_vcmpeqfp", bool_V4SI_type_node, VCMPEQFP, pim_group);
+
+ def_pim_builtin ("vec_vcmpequb", bool_V16QI_type_node, VCMPEQUB, pim_group);
+
+ def_pim_builtin ("vec_vcmpequh", bool_V8HI_type_node, VCMPEQUH, pim_group);
+
+ def_pim_builtin ("vec_vcmpequw", bool_V4SI_type_node, VCMPEQUW, pim_group);
+
+ def_pim_builtin ("vec_vcmpgefp", bool_V4SI_type_node, VCMPGEFP, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtfp", bool_V4SI_type_node, VCMPGTFP, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtsb", bool_V16QI_type_node, VCMPGTSB, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtsh", bool_V8HI_type_node, VCMPGTSH, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtsw", bool_V4SI_type_node, VCMPGTSW, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtub", bool_V16QI_type_node, VCMPGTUB, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtuh", bool_V8HI_type_node, VCMPGTUH, pim_group);
+
+ def_pim_builtin ("vec_vcmpgtuw", bool_V4SI_type_node, VCMPGTUW, pim_group);
+
+ def_pim_builtin ("vec_vctsxs", V4SI_type_node, VCTSXS, pim_group);
+
+ def_pim_builtin ("vec_vctuxs", unsigned_V4SI_type_node, VCTUXS, pim_group);
+
+ def_pim_builtin ("vec_vexptefp", V4SF_type_node, VEXPTEFP, pim_group);
+
+ def_pim_builtin ("vec_vlogefp", V4SF_type_node, VLOGEFP, pim_group);
+
+ def_pim_builtin ("vec_vmaddfp", V4SF_type_node, VMADDFP, pim_group);
+
+ def_pim_builtin ("vec_vmaxfp", NULL_TREE, VMAXFP, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxsb", NULL_TREE, VMAXSB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxsh", NULL_TREE, VMAXSH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxsw", NULL_TREE, VMAXSW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxub", NULL_TREE, VMAXUB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxuh", NULL_TREE, VMAXUH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmaxuw", NULL_TREE, VMAXUW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmhaddshs", V8HI_type_node, VMHADDSHS, pim_group);
+
+ def_pim_builtin ("vec_vmhraddshs", V8HI_type_node, VMHRADDSHS, pim_group);
+
+ def_pim_builtin ("vec_vminfp", NULL_TREE, VMINFP, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminsb", NULL_TREE, VMINSB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminsh", NULL_TREE, VMINSH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminsw", NULL_TREE, VMINSW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminub", NULL_TREE, VMINUB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminuh", NULL_TREE, VMINUH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vminuw", NULL_TREE, VMINUW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmladduhm", NULL_TREE, VMLADDUHM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrghb", NULL_TREE, VMRGHB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrghh", NULL_TREE, VMRGHH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrghw", NULL_TREE, VMRGHW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrglb", NULL_TREE, VMRGLB, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrglh", NULL_TREE, VMRGLH, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmrglw", NULL_TREE, VMRGLW, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vmsummbm", V4SI_type_node, VMSUMMBM, pim_group);
+
+ def_pim_builtin ("vec_vmsumshm", V4SI_type_node, VMSUMSHM, pim_group);
+
+ def_pim_builtin ("vec_vmsumshs", V4SI_type_node, VMSUMSHS, pim_group);
+
+ def_pim_builtin ("vec_vmsumubm", unsigned_V4SI_type_node, VMSUMUBM, pim_group);
+
+ def_pim_builtin ("vec_vmsumuhm", unsigned_V4SI_type_node, VMSUMUHM, pim_group);
+
+ def_pim_builtin ("vec_vmsumuhs", unsigned_V4SI_type_node, VMSUMUHS, pim_group);
+
+ def_pim_builtin ("vec_vmulesb", V8HI_type_node, VMULESB, pim_group);
+
+ def_pim_builtin ("vec_vmulesh", V4SI_type_node, VMULESH, pim_group);
+
+ def_pim_builtin ("vec_vmuleub", unsigned_V8HI_type_node, VMULEUB, pim_group);
+
+ def_pim_builtin ("vec_vmuleuh", unsigned_V4SI_type_node, VMULEUH, pim_group);
+
+ def_pim_builtin ("vec_vmulosb", V8HI_type_node, VMULOSB, pim_group);
+
+ def_pim_builtin ("vec_vmulosh", V4SI_type_node, VMULOSH, pim_group);
+
+ def_pim_builtin ("vec_vmuloub", unsigned_V8HI_type_node, VMULOUB, pim_group);
+
+ def_pim_builtin ("vec_vmulouh", unsigned_V4SI_type_node, VMULOUH, pim_group);
+
+ def_pim_builtin ("vec_vnmsubfp", V4SF_type_node, VNMSUBFP, pim_group);
+
+ def_pim_builtin ("vec_vnor", NULL_TREE, VNOR, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vor", NULL_TREE, VOR, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vperm", V16QI_type_node, VPERM_4SI, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vpkpx", pixel_V8HI_type_node, VPKPX, pim_group);
+
+ def_pim_builtin ("vec_vpkshss", V16QI_type_node, VPKSHSS, pim_group);
+
+ def_pim_builtin ("vec_vpkshus", unsigned_V16QI_type_node, VPKSHUS, pim_group);
+
+ def_pim_builtin ("vec_vpkswss", V8HI_type_node, VPKSWSS, pim_group);
+
+ def_pim_builtin ("vec_vpkswus", unsigned_V8HI_type_node, VPKSWUS, pim_group);
+
+ def_pim_builtin ("vec_vpkuhum", NULL_TREE, VPKUHUM, pim_rt_1h | pim_group);
+
+ def_pim_builtin ("vec_vpkuhus", unsigned_V16QI_type_node, VPKUHUS, pim_group);
+
+ def_pim_builtin ("vec_vpkuwum", NULL_TREE, VPKUWUM, pim_rt_1h | pim_group);
+
+ def_pim_builtin ("vec_vpkuwus", unsigned_V8HI_type_node, VPKUWUS, pim_group);
+
+ def_pim_builtin ("vec_vrefp", V4SF_type_node, VREFP, pim_group);
+
+ def_pim_builtin ("vec_vrfim", V4SF_type_node, VRFIM, pim_group);
+
+ def_pim_builtin ("vec_vrfin", V4SF_type_node, VRFIN, pim_group);
+
+ def_pim_builtin ("vec_vrfip", V4SF_type_node, VRFIP, pim_group);
+
+ def_pim_builtin ("vec_vrfiz", V4SF_type_node, VRFIZ, pim_group);
+
+ def_pim_builtin ("vec_vrlb", NULL_TREE, VRLB, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vrlh", NULL_TREE, VRLH, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vrlw", NULL_TREE, VRLW, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vrsqrtefp", V4SF_type_node, VRSQRTEFP, pim_group);
+
+ def_pim_builtin ("vec_vsel", NULL_TREE, VSEL_4SI, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsl", NULL_TREE, VSL, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vslb", NULL_TREE, VSLB, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsldoi", NULL_TREE, VSLDOI_4SI, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vslh", NULL_TREE, VSLH, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vslo", NULL_TREE, VSLO, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vslw", NULL_TREE, VSLW, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vspltb", NULL_TREE, VSPLTB, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsplth", NULL_TREE, VSPLTH, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vspltisb", V16QI_type_node, VSPLTISB, pim_group);
+
+ def_pim_builtin ("vec_vspltish", V8HI_type_node, VSPLTISH, pim_group);
+
+ def_pim_builtin ("vec_vspltisw", V4SI_type_node, VSPLTISW, pim_group);
+
+ def_pim_builtin ("vec_vspltw", NULL_TREE, VSPLTW, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsr", NULL_TREE, VSR, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsrab", NULL_TREE, VSRAB, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsrah", NULL_TREE, VSRAH, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsraw", NULL_TREE, VSRAW, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsrb", NULL_TREE, VSRB, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsrh", NULL_TREE, VSRH, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsro", NULL_TREE, VSRO, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsrw", NULL_TREE, VSRW, pim_rt_1 | pim_group);
+
+ def_pim_builtin ("vec_vsubcuw", unsigned_V4SI_type_node, VSUBCUW, pim_group);
+
+ def_pim_builtin ("vec_vsubfp", NULL_TREE, VSUBFP, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubsbs", NULL_TREE, VSUBSBS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubshs", NULL_TREE, VSUBSHS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubsws", NULL_TREE, VSUBSWS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsububm", NULL_TREE, VSUBUBM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsububs", NULL_TREE, VSUBUBS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubuhm", NULL_TREE, VSUBUHM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubuhs", NULL_TREE, VSUBUHS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubuwm", NULL_TREE, VSUBUWM, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsubuws", NULL_TREE, VSUBUWS, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_vsum4sbs", V4SI_type_node, VSUM4SBS, pim_group);
+
+ def_pim_builtin ("vec_vsum4shs", V4SI_type_node, VSUM4SHS, pim_group);
+
+ def_pim_builtin ("vec_vsum4ubs", unsigned_V4SI_type_node, VSUM4UBS, pim_group);
+
+ def_pim_builtin ("vec_vsum2sws", V4SI_type_node, VSUM2SWS, pim_group);
+
+ def_pim_builtin ("vec_vsumsws", V4SI_type_node, VSUMSWS, pim_group);
+
+ def_pim_builtin ("vec_vupkhpx", NULL_TREE, VUPKHPX, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vupkhsb", NULL_TREE, VUPKHSB, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vupkhsh", NULL_TREE, VUPKHSH, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vupklpx", NULL_TREE, VUPKLPX, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vupklsb", NULL_TREE, VUPKLSB, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vupklsh", NULL_TREE, VUPKLSH, pim_rt_1d | pim_group);
+
+ def_pim_builtin ("vec_vxor", NULL_TREE, VXOR, pim_rt_12 | pim_group);
+
+ def_pim_builtin ("vec_xor", NULL_TREE, VXOR, pim_rt_12 | pim_group);
+
+ /* PIM Predicates. */
+
+ gcc_assert (pim_code == ALTIVEC_PIM_VEC_ALL_EQ);
+
+ def_pim_builtin ("vec_all_eq", integer_type_node, VCMPEQUB_P, pim_ovl_16 | pim_cr6_lt | pim_group);
+ def_pim_builtin ("vec_all_eq.2", integer_type_node, VCMPEQUH_P, pim_ovl_8 | pim_cr6_lt);
+ def_pim_builtin ("vec_all_eq.3", integer_type_node, VCMPEQFP_P, pim_ovl_4f | pim_cr6_lt);
+ def_pim_builtin ("vec_all_eq.4", integer_type_node, VCMPEQUW_P, pim_ovl_4 | pim_cr6_lt);
+
+ def_pim_builtin ("vec_all_ge", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_manip_swap | pim_cr6_eq | pim_group);
+ def_pim_builtin ("vec_all_ge.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_manip_swap | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ge.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_manip_swap | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ge.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_manip_swap | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ge.5", integer_type_node, VCMPGEFP_P, pim_ovl_4f | pim_cr6_lt);
+ def_pim_builtin ("vec_all_ge.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_manip_swap | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ge.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_manip_swap | pim_cr6_eq);
+
+ def_pim_builtin ("vec_all_gt", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_cr6_lt | pim_group);
+ def_pim_builtin ("vec_all_gt.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_cr6_lt);
+ def_pim_builtin ("vec_all_gt.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_cr6_lt);
+ def_pim_builtin ("vec_all_gt.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_cr6_lt);
+ def_pim_builtin ("vec_all_gt.5", integer_type_node, VCMPGTFP_P, pim_ovl_4f | pim_cr6_lt);
+ def_pim_builtin ("vec_all_gt.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_cr6_lt);
+ def_pim_builtin ("vec_all_gt.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_cr6_lt);
+
+ def_pim_builtin ("vec_all_in", integer_type_node, VCMPBFP_P, pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_le", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_cr6_eq | pim_group);
+ def_pim_builtin ("vec_all_le.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_cr6_eq);
+ def_pim_builtin ("vec_all_le.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_cr6_eq);
+ def_pim_builtin ("vec_all_le.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_cr6_eq);
+ def_pim_builtin ("vec_all_le.5", integer_type_node, VCMPGEFP_P, pim_ovl_4f | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_le.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_cr6_eq);
+ def_pim_builtin ("vec_all_le.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_cr6_eq);
+
+ def_pim_builtin ("vec_all_lt", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_manip_swap | pim_cr6_lt | pim_group);
+ def_pim_builtin ("vec_all_lt.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_lt.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_lt.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_lt.5", integer_type_node, VCMPGTFP_P, pim_ovl_4f | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_lt.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_manip_swap | pim_cr6_lt);
+ def_pim_builtin ("vec_all_lt.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_manip_swap | pim_cr6_lt);
+
+ def_pim_builtin ("vec_all_nan", integer_type_node, VCMPEQFP_P, pim_manip_dup | pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_ne", integer_type_node, VCMPEQUB_P, pim_ovl_16 | pim_cr6_eq | pim_group);
+ def_pim_builtin ("vec_all_ne.2", integer_type_node, VCMPEQUH_P, pim_ovl_8 | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ne.3", integer_type_node, VCMPEQFP_P, pim_ovl_4f | pim_cr6_eq);
+ def_pim_builtin ("vec_all_ne.4", integer_type_node, VCMPEQUW_P, pim_ovl_4 | pim_cr6_eq);
+
+ def_pim_builtin ("vec_all_nge", integer_type_node, VCMPGEFP_P, pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_ngt", integer_type_node, VCMPGTFP_P, pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_nle", integer_type_node, VCMPGEFP_P, pim_manip_swap | pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_nlt", integer_type_node, VCMPGEFP_P, pim_manip_swap | pim_cr6_eq | pim_group);
+
+ def_pim_builtin ("vec_all_numeric", integer_type_node, VCMPEQFP_P, pim_manip_dup | pim_cr6_lt | pim_group);
+
+ def_pim_builtin ("vec_any_eq", integer_type_node, VCMPEQUB_P, pim_ovl_16 | pim_cr6_ne | pim_group);
+ def_pim_builtin ("vec_any_eq.2", integer_type_node, VCMPEQUH_P, pim_ovl_8 | pim_cr6_ne);
+ def_pim_builtin ("vec_any_eq.3", integer_type_node, VCMPEQFP_P, pim_ovl_4f | pim_cr6_ne);
+ def_pim_builtin ("vec_any_eq.4", integer_type_node, VCMPEQUW_P, pim_ovl_4 | pim_cr6_ne);
+
+ def_pim_builtin ("vec_any_ge", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_manip_swap | pim_cr6_ge | pim_group);
+ def_pim_builtin ("vec_any_ge.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_manip_swap | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ge.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_manip_swap | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ge.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_manip_swap | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ge.5", integer_type_node, VCMPGEFP_P, pim_ovl_4f | pim_cr6_ne);
+ def_pim_builtin ("vec_any_ge.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_manip_swap | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ge.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_manip_swap | pim_cr6_ge);
+
+ def_pim_builtin ("vec_any_gt", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_cr6_ne | pim_group);
+ def_pim_builtin ("vec_any_gt.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_cr6_ne);
+ def_pim_builtin ("vec_any_gt.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_cr6_ne);
+ def_pim_builtin ("vec_any_gt.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_cr6_ne);
+ def_pim_builtin ("vec_any_gt.5", integer_type_node, VCMPGTFP_P, pim_ovl_4f | pim_cr6_ne);
+ def_pim_builtin ("vec_any_gt.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_cr6_ne);
+ def_pim_builtin ("vec_any_gt.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_cr6_ne);
+
+ def_pim_builtin ("vec_any_le", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_cr6_ge | pim_group);
+ def_pim_builtin ("vec_any_le.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_cr6_ge);
+ def_pim_builtin ("vec_any_le.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_cr6_ge);
+ def_pim_builtin ("vec_any_le.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_cr6_ge);
+ def_pim_builtin ("vec_any_le.5", integer_type_node, VCMPGEFP_P, pim_ovl_4f | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_le.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_cr6_ge);
+ def_pim_builtin ("vec_any_le.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_cr6_ge);
+
+ def_pim_builtin ("vec_any_lt", integer_type_node, VCMPGTUB_P, pim_ovl_16u_16u | pim_manip_swap | pim_cr6_ne | pim_group);
+ def_pim_builtin ("vec_any_lt.2", integer_type_node, VCMPGTSB_P, pim_ovl_16 | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_lt.3", integer_type_node, VCMPGTUH_P, pim_ovl_8u_8u | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_lt.4", integer_type_node, VCMPGTSH_P, pim_ovl_8 | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_lt.5", integer_type_node, VCMPGTFP_P, pim_ovl_4f | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_lt.6", integer_type_node, VCMPGTUW_P, pim_ovl_4u_4u | pim_manip_swap | pim_cr6_ne);
+ def_pim_builtin ("vec_any_lt.7", integer_type_node, VCMPGTSW_P, pim_ovl_4 | pim_manip_swap | pim_cr6_ne);
+
+ def_pim_builtin ("vec_any_nan", integer_type_node, VCMPEQFP_P, pim_manip_dup | pim_cr6_ge | pim_group);
+
+ def_pim_builtin ("vec_any_ne", integer_type_node, VCMPEQUB_P, pim_ovl_16 | pim_cr6_ge | pim_group);
+ def_pim_builtin ("vec_any_ne.2", integer_type_node, VCMPEQUH_P, pim_ovl_8 | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ne.3", integer_type_node, VCMPEQFP_P, pim_ovl_4f | pim_cr6_ge);
+ def_pim_builtin ("vec_any_ne.4", integer_type_node, VCMPEQUW_P, pim_ovl_4 | pim_cr6_ge);
+
+ def_pim_builtin ("vec_any_nge", integer_type_node, VCMPGEFP_P, pim_cr6_ge | pim_group);
+
+ def_pim_builtin ("vec_any_ngt", integer_type_node, VCMPGTFP_P, pim_cr6_ge | pim_group);
+
+ def_pim_builtin ("vec_any_nle", integer_type_node, VCMPGEFP_P, pim_manip_swap | pim_cr6_ge | pim_group);
+
+ def_pim_builtin ("vec_any_nlt", integer_type_node, VCMPGEFP_P, pim_manip_swap | pim_cr6_ge | pim_group);
+
+ def_pim_builtin ("vec_any_numeric", integer_type_node, VCMPEQFP_P, pim_manip_dup | pim_cr6_ne | pim_group);
+
+ def_pim_builtin ("vec_any_out", integer_type_node, VCMPBFP_P, pim_cr6_ne | pim_group);
+
+ gcc_assert (pim_code == ALTIVEC_PIM__LAST + 1);
+ }
+ /* APPLE LOCAL end AltiVec */
}
static void
@@ -8925,6 +10646,8 @@ expand_block_clear (rtx operands[])
int offset;
int clear_bytes;
int clear_step;
+ /* APPLE LOCAL Altivec 3840704 */
+ bool cpu_altivec = TARGET_ALTIVEC && ! flag_disable_opts_for_faltivec;
/* If this is not a fixed size move, just call memcpy */
if (! constp)
@@ -8940,11 +10663,23 @@ expand_block_clear (rtx operands[])
if (bytes <= 0)
return 1;
+ /* APPLE LOCAL begin Altivec 3840704 */
+ {
+ static bool warned;
+ if (flag_disable_opts_for_faltivec && align >= 128 && ! warned)
+ {
+ warned = true;
+ warning ("vectorised memset disabled due to use of -faltivec without -maltivec");
+ }
+ }
+ /* APPLE LOCAL end Altivec 3840704 */
+
/* Use the builtin memset after a point, to avoid huge code bloat.
When optimize_size, avoid any significant code bloat; calling
memset is about 4 instructions, so allow for one instruction to
load zero and three to do clearing. */
- if (TARGET_ALTIVEC && align >= 128)
+ /* APPLE LOCAL Altivec 3840704 */
+ if (cpu_altivec && align >= 128)
clear_step = 16;
else if (TARGET_POWERPC64 && align >= 32)
clear_step = 8;
@@ -8961,7 +10696,8 @@ expand_block_clear (rtx operands[])
enum machine_mode mode = BLKmode;
rtx dest;
- if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
+ /* APPLE LOCAL Altivec 3840704 */
+ if (bytes >= 16 && cpu_altivec && align >= 128)
{
clear_bytes = 16;
mode = V4SImode;
@@ -9052,9 +10788,24 @@ expand_block_move (rtx operands[])
enum machine_mode mode = BLKmode;
rtx src, dest;
+ /* APPLE LOCAL begin Altivec 3840704 */
+ {
+ static bool warned;
+ if (flag_disable_opts_for_faltivec && bytes >= 16 && align >= 128
+ && ! warned)
+ {
+ warned = true;
+ warning ("vectorised memcpy disabled due to use of -faltivec without -maltivec");
+ }
+ }
+ /* APPLE LOCAL end Altivec 3840704 */
+
/* Altivec first, since it will be faster than a string move
when it applies, and usually not significantly larger. */
- if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
+ /* APPLE LOCAL begin Altivec 3840704 */
+ if (TARGET_ALTIVEC && ! flag_disable_opts_for_faltivec
+ && bytes >= 16 && align >= 128)
+ /* APPLE LOCAL end Altivec 3840704 */
{
move_bytes = 16;
mode = V4SImode;
@@ -10315,7 +12066,11 @@ rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
static struct machine_function *
rs6000_init_machine_status (void)
{
- return ggc_alloc_cleared (sizeof (machine_function));
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
+ machine_function *mf = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
+ mf->substitute_pic_base_reg = INVALID_REGNUM;
+ return mf;
+ /* APPLE LOCAL end volatile pic base reg in leaves */
}
/* These macros test for integers and extract the low-order bits. */
@@ -10535,8 +12290,7 @@ print_operand (FILE *file, rtx x, int code)
/* Bit 1 is EQ bit. */
i = 4 * (REGNO (x) - CR0_REGNO) + 2;
- /* If we want bit 31, write a shift count of zero, not 32. */
- fprintf (file, "%d", i == 31 ? 0 : i + 1);
+ fprintf (file, "%d", i);
return;
case 'E':
@@ -11341,7 +13095,7 @@ rs6000_generate_compare (enum rtx_code code)
if ((TARGET_E500 && !TARGET_FPRS && TARGET_HARD_FLOAT)
&& rs6000_compare_fp_p)
{
- rtx cmp, or1, or2, or_result, compare_result2;
+ rtx cmp, or_result, compare_result2;
enum machine_mode op_mode = GET_MODE (rs6000_compare_op0);
if (op_mode == VOIDmode)
@@ -11415,9 +13169,6 @@ rs6000_generate_compare (enum rtx_code code)
default: abort ();
}
- or1 = gen_reg_rtx (SImode);
- or2 = gen_reg_rtx (SImode);
- or_result = gen_reg_rtx (CCEQmode);
compare_result2 = gen_reg_rtx (CCFPmode);
/* Do the EQ. */
@@ -11436,14 +13187,10 @@ rs6000_generate_compare (enum rtx_code code)
else abort ();
emit_insn (cmp);
- or1 = gen_rtx_GT (SImode, compare_result, const0_rtx);
- or2 = gen_rtx_GT (SImode, compare_result2, const0_rtx);
-
/* OR them together. */
- cmp = gen_rtx_SET (VOIDmode, or_result,
- gen_rtx_COMPARE (CCEQmode,
- gen_rtx_IOR (SImode, or1, or2),
- const_true_rtx));
+ or_result = gen_reg_rtx (CCFPmode);
+ cmp = gen_e500_cr_ior_compare (or_result, compare_result,
+ compare_result2);
compare_result = or_result;
code = EQ;
}
@@ -11553,9 +13300,9 @@ rs6000_emit_sCOND (enum rtx_code code, rtx result)
abort ();
if (cond_code == NE)
- emit_insn (gen_e500_flip_eq_bit (t, t));
+ emit_insn (gen_e500_flip_gt_bit (t, t));
- emit_insn (gen_move_from_CR_eq_bit (result, t));
+ emit_insn (gen_move_from_CR_gt_bit (result, t));
return;
}
@@ -11736,9 +13483,9 @@ output_cbranch (rtx op, const char *label, int reversed, rtx insn)
return string;
}
-/* Return the string to flip the EQ bit on a CR. */
+/* Return the string to flip the GT bit on a CR. */
char *
-output_e500_flip_eq_bit (rtx dst, rtx src)
+output_e500_flip_gt_bit (rtx dst, rtx src)
{
static char string[64];
int a, b;
@@ -11747,9 +13494,9 @@ output_e500_flip_eq_bit (rtx dst, rtx src)
|| GET_CODE (src) != REG || ! CR_REGNO_P (REGNO (src)))
abort ();
- /* EQ bit. */
- a = 4 * (REGNO (dst) - CR0_REGNO) + 2;
- b = 4 * (REGNO (src) - CR0_REGNO) + 2;
+ /* GT bit. */
+ a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
+ b = 4 * (REGNO (src) - CR0_REGNO) + 1;
sprintf (string, "crnot %d,%d", a, b);
return string;
@@ -11988,10 +13735,14 @@ rs6000_emit_vector_select (rtx dest, rtx op1, rtx op2, rtx mask)
temp = gen_reg_rtx (dest_mode);
+ /* APPLE LOCAL begin AV */
+ /* Fix op1 and op2 order. */
t = gen_rtx_fmt_ee (SET, VOIDmode, temp,
gen_rtx_fmt_Ei (UNSPEC, dest_mode,
- gen_rtvec (3, op1, op2, mask),
+ gen_rtvec (3, op2, op1, mask),
vsel_insn_index));
+ /* APPLE LOCAL end AV */
+
emit_insn (t);
emit_move_insn (dest, temp);
return;
@@ -12266,6 +14017,10 @@ rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
else
target = emit_conditional_move (dest, c, op0, op1, mode,
op1, op0, mode, 0);
+ /* APPLE LOCAL begin pragma fenv 3943021 */
+ if (flag_trapping_math && target == NULL_RTX)
+ return;
+ /* APPLE LOCAL end pragma fenv 3943021 */
if (target == NULL_RTX)
abort ();
if (target != dest)
@@ -12444,6 +14199,8 @@ first_reg_to_save (void)
#if TARGET_MACHO
if (flag_pic
&& current_function_uses_pic_offset_table
+ /* APPLE LOCAL volatile pic base reg in leaves */
+ && cfun->machine->substitute_pic_base_reg == INVALID_REGNUM
&& first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
return RS6000_PIC_OFFSET_TABLE_REGNUM;
#endif
@@ -12755,9 +14512,8 @@ rs6000_stack_info (void)
&& !FP_SAVE_INLINE (info_ptr->first_fp_reg_save))
|| info_ptr->first_altivec_reg_save <= LAST_ALTIVEC_REGNO
|| (DEFAULT_ABI == ABI_V4 && current_function_calls_alloca)
- || (DEFAULT_ABI == ABI_DARWIN
- && flag_pic
- && current_function_uses_pic_offset_table)
+ /* APPLE LOCAL but why? */
+ /* Test for flag_pic, abi, current_function deleted deliberately. */
|| info_ptr->calls_p)
{
info_ptr->lr_save_p = 1;
@@ -12916,7 +14672,8 @@ rs6000_stack_info (void)
+ ehrd_size
+ info_ptr->cr_size
+ info_ptr->lr_size
- + info_ptr->vrsave_size
+ /* APPLE LOCAL fix redundant add? */
+ /* FIXME: the FSF does "+ info_ptr->vrsave_size" here, shouldn't we? */
+ info_ptr->toc_size,
save_align);
@@ -13203,16 +14960,33 @@ rs6000_return_addr (int count, rtx frame)
}
/* Say whether a function is a candidate for sibcall handling or not.
- We do not allow indirect calls to be optimized into sibling calls.
+ APPLE LOCAL sibling calls
+
Also, we can't do it if there are any vector parameters; there's
nowhere to put the VRsave code so it works; note that functions with
vector parameters are required to have a prototype, so the argument
type info must be available here. (The tail recursion case can work
with vector parameters, but there's no way to distinguish here.) */
+
+/* APPLE LOCAL begin sibling calls
+ On Darwin only, indirect calls may be sibcalls. This is enabled
+ primarily by target-specific logic in calls.c.
+ APPLE LOCAL end sibling calls */
static bool
rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
{
tree type;
+ /* APPLE LOCAL begin long-branch */
+ if (TARGET_LONG_BRANCH)
+ return 0;
+ /* APPLE LOCAL end long-branch */
+
+ /* APPLE LOCAL begin indirect sibcalls */
+ /* This goes with a lot of local changes in expand_call. */
+ if (DEFAULT_ABI == ABI_DARWIN && !decl)
+ return true;
+ /* APPLE LOCAL end indirect sibcalls */
+
if (decl)
{
if (TARGET_ALTIVEC_VRSAVE)
@@ -13325,11 +15099,10 @@ rs6000_emit_load_toc_table (int fromprolog)
rtx temp0 = (fromprolog
? gen_rtx_REG (Pmode, 0)
: gen_reg_rtx (Pmode));
- rtx symF;
if (fromprolog)
{
- rtx symL;
+ rtx symF, symL;
ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
@@ -13347,14 +15120,9 @@ rs6000_emit_load_toc_table (int fromprolog)
else
{
rtx tocsym;
- static int reload_toc_labelno = 0;
tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
-
- ASM_GENERATE_INTERNAL_LABEL (buf, "LCG", reload_toc_labelno++);
- symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
-
- emit_insn (gen_load_toc_v4_PIC_1b (tempLR, symF, tocsym));
+ emit_insn (gen_load_toc_v4_PIC_1b (tempLR, tocsym));
emit_move_insn (dest, tempLR);
emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
}
@@ -13827,6 +15595,131 @@ generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
return insn;
}
+/* APPLE LOCAL begin special ObjC method use of R12 */
+static int objc_method_using_pic = 0;
+
+/* Determine whether a name is an ObjC method. */
+static int name_encodes_objc_method_p (const char *piclabel_name)
+{
+ return (piclabel_name[0] == '*' && piclabel_name[1] == '"'
+ ? (piclabel_name[2] == 'L'
+ && (piclabel_name[3] == '+' || piclabel_name[3] == '-'))
+ : (piclabel_name[1] == 'L'
+ && (piclabel_name[2] == '+' || piclabel_name[2] == '-')));
+}
+/* APPLE LOCAL end special ObjC method use of R12 */
+
+/* APPLE LOCAL begin recompute PIC register use */
+/* Sometimes a function has references that require the PIC register,
+ but optimization removes them all. To catch this case
+ recompute current_function_uses_pic_offset_table here.
+ This may allow us to eliminate the prologue and epilogue. */
+
+static int
+recompute_PIC_register_use (void)
+{
+ if (DEFAULT_ABI == ABI_DARWIN
+ && flag_pic && current_function_uses_pic_offset_table
+ && !cfun->machine->ra_needs_full_frame)
+ {
+ rtx insn;
+ current_function_uses_pic_offset_table = 0;
+ push_topmost_sequence ();
+ for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
+ if ( reg_mentioned_p (pic_offset_table_rtx, insn))
+ {
+ current_function_uses_pic_offset_table = 1;
+ break;
+ }
+ pop_topmost_sequence ();
+ }
+ return 0;
+}
+/* APPLE LOCAL end recompute PIC register use */
+
+/* APPLE LOCAL begin volatile pic base reg in leaves */
+/* If this is a leaf function and we used any pic-based references,
+ see if there is an unused volatile reg we can use instead of R31.
+ If so set substitute_pic_base_reg to this reg, set its reg_ever_used
+ bit (to avoid confusing later calls to alloc_volatile_reg), and
+ make a pass through the existing RTL, substituting the new reg for
+ the old one wherever it appears.
+ Logically this is a void function; it is int so it can be used to
+ initialize a dummy variable, thus getting executed ahead of other
+ initializations. Technicolour yawn. */
+
+/* ALLOC_VOLATILE_REG allocates a volatile register AFTER all gcc
+ register allocations have been done; we use it to reserve an
+ unused reg for holding VRsave. Returns -1 in case of failure (all
+ volatile regs are in use.) */
+/* Note, this is called from both the prologue and epilogue code,
+ with the assumption that it will return the same result both
+ times! Since the register arrays are not changed in between
+ this is valid, if a bit fragile. */
+/* In future we may also use this to grab an unused volatile reg to
+ hold the PIC base reg in the event that the current function makes
+ no procedure calls; this was done in 2.95. */
+static int
+alloc_volatile_reg (void)
+{
+ if (current_function_is_leaf
+ && reload_completed
+ && !cfun->machine->ra_needs_full_frame)
+ {
+ int r;
+ for (r = 10; r >= 2; --r)
+ if (! fixed_regs[r] && ! regs_ever_live[r])
+ return r;
+ }
+
+ return -1; /* fail */
+}
+
+static int
+try_leaf_pic_optimization (void)
+{
+ if ( DEFAULT_ABI==ABI_DARWIN
+ && flag_pic && current_function_uses_pic_offset_table
+ && current_function_is_leaf
+ && !cfun->machine->ra_needs_full_frame )
+ {
+ int reg = alloc_volatile_reg ();
+ if ( reg != -1 )
+ {
+ /* Run through the insns, changing references to the original
+ PIC_OFFSET_TABLE_REGNUM to our new one. */
+ rtx insn;
+ const int nregs = PIC_OFFSET_TABLE_REGNUM + 1;
+ rtx *reg_map = (rtx *) xmalloc (nregs * sizeof (rtx));
+ memset (reg_map, 0, nregs * sizeof (rtx));
+ reg_map[PIC_OFFSET_TABLE_REGNUM] = gen_rtx_REG (SImode, reg);
+
+ push_topmost_sequence ();
+ for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ replace_regs (PATTERN (insn), reg_map, nregs, 1);
+ replace_regs (REG_NOTES (insn), reg_map, nregs, 1);
+ }
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+ if ( !SIBLING_CALL_P (insn))
+ abort ();
+ }
+ }
+ pop_topmost_sequence ();
+ free (reg_map);
+
+ regs_ever_live[reg] = 1;
+ regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 0;
+ cfun->machine->substitute_pic_base_reg = reg;
+ }
+ }
+ return 0;
+}
+/* APPLE LOCAL end volatile pic base reg in leaves */
+
/* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
@@ -13902,6 +15795,10 @@ gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
void
rs6000_emit_prologue (void)
{
+ /* APPLE LOCAL recompute PIC register use */
+ int dummy ATTRIBUTE_UNUSED = recompute_PIC_register_use ();
+ /* APPLE LOCAL volatile pic base reg in leaves */
+ int ignored ATTRIBUTE_UNUSED = try_leaf_pic_optimization ();
rs6000_stack_t *info = rs6000_stack_info ();
enum machine_mode reg_mode = Pmode;
int reg_size = TARGET_32BIT ? 4 : 8;
@@ -13913,11 +15810,20 @@ rs6000_emit_prologue (void)
int saving_FPRs_inline;
int using_store_multiple;
HOST_WIDE_INT sp_offset = 0;
+ /* APPLE LOCAL begin callers_lr_already_saved */
+ int callers_lr_already_saved = 0;
+#if TARGET_MACHO
+ int lr_already_set_up_for_pic = 0;
+#endif
+ /* APPLE LOCAL end callers_lr_already_saved */
+ /* APPLE LOCAL special ObjC method use of R12 */
+ objc_method_using_pic = 0;
if (TARGET_FIX_AND_CONTINUE)
{
+ /* APPLE LOCAL begin mainline 5 nops */
/* gdb on darwin arranges to forward a function from the old
- address by modifying the first 4 instructions of the function
+ address by modifying the first 5 instructions of the function
to branch to the overriding function. This is necessary to
permit function pointers that point to the old function to
actually forward to the new function. */
@@ -13925,6 +15831,8 @@ rs6000_emit_prologue (void)
emit_insn (gen_nop ());
emit_insn (gen_nop ());
emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ /* APPLE LOCAL end mainline 5 nops */
}
if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
@@ -13962,6 +15870,31 @@ rs6000_emit_prologue (void)
rs6000_emit_stack_tie ();
}
+ /* APPLE LOCAL begin special ObjC method use of R12 */
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN
+ && current_function_uses_pic_offset_table && flag_pic)
+ {
+ const char *piclabel_name = machopic_function_base_name ();
+
+ if (name_encodes_objc_method_p (piclabel_name)
+ /* If we're saving vector or FP regs via a function call,
+ then don't bother with this ObjC R12 optimization.
+ This test also eliminates world_save. */
+ && (info->first_altivec_reg_save > LAST_ALTIVEC_REGNO
+ || VECTOR_SAVE_INLINE (info->first_altivec_reg_save))
+ && (info->first_fp_reg_save == 64
+ || FP_SAVE_INLINE (info->first_fp_reg_save)))
+ {
+ /* We cannot output the label now; there seems to be no
+ way to prevent cfgcleanup from deleting it. It is done
+ in rs6000_output_function_prologue with fprintf! */
+ objc_method_using_pic = 1;
+ }
+ }
+#endif /* TARGET_MACHO */
+ /* APPLE LOCAL end special ObjC method use of R12 */
+
/* Handle world saves specially here. */
if (WORLD_SAVE_P (info))
{
@@ -14166,7 +16099,12 @@ rs6000_emit_prologue (void)
{
rtx set;
- cr_save_rtx = gen_rtx_REG (SImode, 12);
+ /* APPLE LOCAL begin special ObjC method use of R12 */
+ /* For Darwin, use R2, so we don't clobber the special ObjC
+ method use of R12. R11 has a special meaning for Ada, so we
+ can't use that. */
+ cr_save_rtx = gen_rtx_REG (SImode, DEFAULT_ABI == ABI_DARWIN ? 2 : 12);
+ /* APPLE LOCAL end special ObjC method use of R12 */
insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
RTX_FRAME_RELATED_P (insn) = 1;
/* Now, there's no way that dwarf2out_frame_debug_expr is going
@@ -14201,17 +16139,62 @@ rs6000_emit_prologue (void)
char rname[30];
const char *alloc_rname;
rtvec p;
- p = rtvec_alloc (2 + 64 - info->first_fp_reg_save);
+ /* APPLE LOCAL begin reduce code size */
+
+ int gen_following_label = 0;
+ int count = 0;
+
+ if (current_function_uses_pic_offset_table && flag_pic
+#ifdef INSN_SCHEDULING
+ /* Prevent the compiler from crashing
+ while scheduling insns after global_alloc! */
+ && (optimize == 0 || !flag_schedule_insns_after_reload)
+#endif
+ /* If this is the last CALL in the prolog, then we've got our PC.
+ If we're saving AltiVec regs via a function, we're not last. */
+ && (info->first_altivec_reg_save > LAST_ALTIVEC_REGNO
+ || VECTOR_SAVE_INLINE (info->first_altivec_reg_save)))
+ gen_following_label = lr_already_set_up_for_pic = 1;
+ /* APPLE LOCAL end reduce code size */
+
+ /* APPLE LOCAL begin +2 (could be conditionalized) */
+ p = rtvec_alloc (2 + 64 - info->first_fp_reg_save + 2
+ + gen_following_label);
+ /* APPLE LOCAL end +2 (could be conditionalized) */
+
+ /* APPLE LOCAL begin reduce code size */
+ /* 0 -> count++ */
+ RTVEC_ELT (p, count++) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+#if TARGET_MACHO
+ /* We have to calculate the offset into saveFP to where we must
+ call (!!) SAVEFP also saves the caller's LR -- placed into
+ R0 above -- into 8(R1). SAVEFP/RESTOREFP should never be
+ called to save or restore only F31. */
- RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_REG (Pmode,
- LINK_REGISTER_REGNUM));
+ if (info->lr_save_offset != (POINTER_SIZE / 4) || info->first_fp_reg_save == 63)
+ abort ();
+
+ sprintf (rname, "*saveFP%s%.0d ; save f%d-f31",
+ (info->first_fp_reg_save - 32 == 14 ? "" : "+"),
+ (info->first_fp_reg_save - 46) * 4,
+ info->first_fp_reg_save - 32);
+#else
+ /* APPLE LOCAL end reduce code size */
sprintf (rname, "%s%d%s", SAVE_FP_PREFIX,
info->first_fp_reg_save - 32, SAVE_FP_SUFFIX);
+ /* APPLE LOCAL reduce code size */
+#endif /* TARGET_MACHO */
alloc_rname = ggc_strdup (rname);
- RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode,
+ /* APPLE LOCAL reduce code size */
+ RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode,
gen_rtx_SYMBOL_REF (Pmode,
alloc_rname));
+ /* APPLE LOCAL begin reduce code size */
+ if (gen_following_label)
+ RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode, const0_rtx);
+ /* APPLE LOCAL end reduce code size */
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
{
rtx addr, reg, mem;
@@ -14222,11 +16205,32 @@ rs6000_emit_prologue (void)
mem = gen_rtx_MEM (DFmode, addr);
set_mem_alias_set (mem, rs6000_sr_alias_set);
- RTVEC_ELT (p, i + 2) = gen_rtx_SET (VOIDmode, mem, reg);
+ /* APPLE LOCAL reduce code size */
+ RTVEC_ELT (p, count++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ /* APPLE LOCAL begin C++ EH and setjmp (radar 2866661) */
+#if TARGET_MACHO
+ /* Darwin version of these functions stores R0. */
+ RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
+
+ /* If we saved LR, *tell* people about it! */
+ if (info->lr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset + sp_offset));
+ rtx mem = gen_rtx_MEM (Pmode, addr);
+ /* This should not be of rs6000_sr_alias_set, because of
+ __builtin_return_address. */
+ RTVEC_ELT (p, count++) = gen_rtx_SET (Pmode, mem,
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
}
+#endif
+ /* APPLE LOCAL end C++ EH and setjmp (radar 2866661) */
insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
+ /* APPLE LOCAL callers_lr_already_saved */
+ callers_lr_already_saved = 1;
}
/* Save GPRs. This is done as a PARALLEL if we are using
@@ -14264,7 +16268,13 @@ rs6000_emit_prologue (void)
&& TARGET_TOC && TARGET_MINIMAL_TOC)))
|| (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
- || (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic
+ && ((current_function_uses_pic_offset_table
+ && cfun->machine->substitute_pic_base_reg
+ == INVALID_REGNUM)
+ || cfun->machine->ra_needs_full_frame)))))
+ /* APPLE LOCAL end volatile pic base reg in leaves */
{
rtx addr, reg, mem;
reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
@@ -14346,8 +16356,20 @@ rs6000_emit_prologue (void)
}
}
+ /* APPLE LOCAL begin special ObjC method use of R12 */
+ if (objc_method_using_pic)
+ rs6000_maybe_dead (
+ emit_move_insn (gen_rtx_REG (Pmode,
+ cfun->machine->substitute_pic_base_reg
+ == INVALID_REGNUM
+ ? PIC_OFFSET_TABLE_REGNUM
+ : cfun->machine->substitute_pic_base_reg),
+ gen_rtx_REG (Pmode, 12)));
+ /* APPLE LOCAL end special ObjC method use of R12 */
+
/* Save lr if we used it. */
- if (!WORLD_SAVE_P (info) && info->lr_save_p)
+ /* APPLE LOCAL callers_lr_already_saved */
+ if (!WORLD_SAVE_P (info) && info->lr_save_p && !callers_lr_already_saved)
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->lr_save_offset + sp_offset));
@@ -14441,17 +16463,38 @@ rs6000_emit_prologue (void)
#if TARGET_MACHO
if (DEFAULT_ABI == ABI_DARWIN
+ /* APPLE LOCAL special ObjC method use of R12 */
+ && !objc_method_using_pic
&& flag_pic && current_function_uses_pic_offset_table)
{
rtx lr = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
rtx src = machopic_function_base_sym ();
- rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (lr, src)));
+ /* APPLE LOCAL begin save and restore LR */
+ /* Save and restore LR locally around this call (in R0). */
+ if (!info->lr_save_p)
+ rs6000_maybe_dead (emit_move_insn (gen_rtx_REG (Pmode, 0), lr));
+ /* APPLE LOCAL end save and restore LR */
+ /* APPLE LOCAL begin performance enhancement */
+ if (!lr_already_set_up_for_pic)
+ rs6000_maybe_dead (emit_insn ((TARGET_64BIT
+ ? gen_load_macho_picbase_di (lr, src)
+ : gen_load_macho_picbase (lr, src))));
+ /* APPLE LOCAL end performance enhancement */
+
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
insn = emit_move_insn (gen_rtx_REG (Pmode,
- RS6000_PIC_OFFSET_TABLE_REGNUM),
+ (cfun->machine->substitute_pic_base_reg
+ == INVALID_REGNUM)
+ ? RS6000_PIC_OFFSET_TABLE_REGNUM
+ : cfun->machine->substitute_pic_base_reg),
lr);
rs6000_maybe_dead (insn);
+
+ if (!info->lr_save_p)
+ rs6000_maybe_dead (emit_move_insn (lr, gen_rtx_REG (Pmode, 0)));
+ /* APPLE LOCAL end volatile pic base reg in leaves */
}
#endif
}
@@ -14467,6 +16510,8 @@ rs6000_output_function_prologue (FILE *file,
if (TARGET_DEBUG_STACK)
debug_stack_info (info);
+ /* APPLE LOCAL do not extern fp save/restore */
+#if !TARGET_MACHO
/* Write .extern for any function we will call to save and restore
fp values. */
if (info->first_fp_reg_save < 64
@@ -14475,6 +16520,8 @@ rs6000_output_function_prologue (FILE *file,
SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX,
RESTORE_FP_PREFIX, info->first_fp_reg_save - 32,
RESTORE_FP_SUFFIX);
+ /* APPLE LOCAL do not extern fp save/restore */
+#endif /* !TARGET_MACHO */
/* Write .extern for AIX common mode routines, if needed. */
if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
@@ -14488,6 +16535,17 @@ rs6000_output_function_prologue (FILE *file,
common_mode_defined = 1;
}
+ /* APPLE LOCAL begin special ObjC method use of R12 */
+#if TARGET_MACHO
+ if ( HAVE_prologue && DEFAULT_ABI == ABI_DARWIN && objc_method_using_pic )
+ {
+ /* APPLE FIXME isn't there an asm macro to do all this? */
+ const char* piclabel = machopic_function_base_name ();
+ fprintf(file, "%s:\n", (*piclabel == '*') ? piclabel + 1 : piclabel);
+ }
+#endif
+ /* APPLE LOCAL end special ObjC method use of R12 */
+
if (! HAVE_prologue)
{
start_sequence ();
@@ -14562,6 +16620,8 @@ rs6000_emit_epilogue (int sibcall)
using_mfcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
|| rs6000_cpu == PROCESSOR_PPC603
|| rs6000_cpu == PROCESSOR_PPC750
+ /* APPLE LOCAL ? */
+ || rs6000_cpu == PROCESSOR_PPC7400
|| optimize_size);
if (WORLD_SAVE_P (info))
@@ -14749,7 +16809,10 @@ rs6000_emit_epilogue (int sibcall)
set_mem_alias_set (mem, rs6000_sr_alias_set);
- emit_move_insn (gen_rtx_REG (SImode, 12), mem);
+ /* APPLE LOCAL begin use R11 because of ObjC use of R12 in sibcall to CTR */
+ emit_move_insn (gen_rtx_REG (SImode,
+ DEFAULT_ABI == ABI_DARWIN ? 11 : 12), mem);
+ /* APPLE LOCAL end use R11 because of ObjC use of R12 in sibcall to CTR */
}
/* Set LR here to try to overlap restores below. */
@@ -14821,7 +16884,14 @@ rs6000_emit_epilogue (int sibcall)
&& TARGET_TOC && TARGET_MINIMAL_TOC)))
|| (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
- || (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
+ /* APPLE LOCAL begin darwin native */
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic
+ && ((current_function_uses_pic_offset_table
+ && cfun->machine->substitute_pic_base_reg
+ == INVALID_REGNUM)
+ || cfun->machine->ra_needs_full_frame)))))
+
+ /* APPLE LOCAL end darwin native */
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->gp_save_offset
@@ -14875,7 +16945,9 @@ rs6000_emit_epilogue (int sibcall)
/* If we saved cr, restore it here. Just those that were used. */
if (info->cr_save_p)
{
- rtx r12_rtx = gen_rtx_REG (SImode, 12);
+ /* APPLE LOCAL use R11 because of ObjC use of R12 in sibcall to CTR */
+ /* APPLE LOCAL silly name retained to minimize deviation from FSF */
+ rtx r12_rtx = gen_rtx_REG (SImode, DEFAULT_ABI == ABI_DARWIN ? 11 : 12);
int count = 0;
if (using_mfcr_multiple)
@@ -14975,8 +17047,25 @@ rs6000_emit_epilogue (int sibcall)
char rname[30];
const char *alloc_rname;
+ /* APPLE LOCAL begin Reduce code size / improve performance */
+#if TARGET_MACHO
+ /* We have to calculate the offset into RESTFP to where we must
+ call (!!) RESTFP also restores the caller's LR from 8(R1).
+ RESTFP should *never* be called to restore only F31. */
+
+ if (info->lr_save_offset != (POINTER_SIZE / 4) || info->first_fp_reg_save == 63)
+ abort ();
+
+ sprintf (rname, "*restFP%s%.0d ; restore f%d-f31",
+ (info->first_fp_reg_save - 32 == 14 ? "" : "+"),
+ (info->first_fp_reg_save - 46) * 4,
+ info->first_fp_reg_save - 32);
+#else
+ /* APPLE LOCAL end Reduce code size / improve performance */
sprintf (rname, "%s%d%s", RESTORE_FP_PREFIX,
info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
+ /* APPLE LOCAL Reduce code size / improve performance */
+#endif /* TARGET_MACHO */
alloc_rname = ggc_strdup (rname);
RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode,
gen_rtx_SYMBOL_REF (Pmode,
@@ -16585,8 +18674,22 @@ rs6000_is_costly_dependence (rtx insn, rtx next, rtx link, int cost,
&& (!link || (int) REG_NOTE_KIND (link) == 0))
/* Prevent load after store in the same group if it is a true
dependence. */
+ /* APPLE LOCAL begin nop on true-dependence. */
+ {
+ if (GET_CODE (PATTERN (next)) == SET && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx load_mem = SET_SRC (PATTERN (next));
+ rtx sto_mem = SET_DEST (PATTERN (insn));
+ if (GET_CODE (load_mem) == MEM && GET_CODE (sto_mem) == MEM)
+ /* Only consider those true-depenedence cases that memory conflict
+ can be determined. Exclude cases, where true-dependency was decided
+ because memory conflict could not be determined from aliasing info. */
+ return must_true_dependence (load_mem, sto_mem);
+ }
return true;
-
+ }
+ /* APPLE LOCAL end nop on true-dependence. */
+
/* The flag is set to X; dependences with latency >= X are considered costly,
and will not be scheduled in the same group. */
if (rs6000_sched_costly_dep <= max_dep_latency
@@ -17166,6 +19269,13 @@ rs6000_handle_altivec_attribute (tree *node,
switch (altivec_type)
{
+ /* APPLE LOCAL begin AltiVec */
+ case 'e':
+ /* Return the constituent element type. */
+ result = (ALTIVEC_VECTOR_MODE (mode) ? TREE_TYPE (type) : type);
+ break;
+ /* APPLE LOCAL end AltiVec */
+
case 'v':
unsigned_p = TYPE_UNSIGNED (type);
switch (mode)
@@ -17206,8 +19316,12 @@ rs6000_handle_altivec_attribute (tree *node,
default: break;
}
- if (result && result != type && TYPE_READONLY (type))
- result = build_qualified_type (result, TYPE_QUAL_CONST);
+ /* APPLE LOCAL begin AltiVec */
+ /* Propagate qualifiers attached to the element type
+ onto the vector type. */
+ if (result && result != type && TYPE_QUALS (type))
+ result = build_qualified_type (result, TYPE_QUALS (type));
+ /* APPLE LOCAL end AltiVec */
*no_add_attrs = true; /* No need to hang on to the attribute. */
@@ -17516,9 +19630,11 @@ macho_branch_islands (void)
strcat (tmp_buf, label);
strcat (tmp_buf, "_pic\n");
strcat (tmp_buf, label);
- strcat (tmp_buf, "_pic:\n\tmflr r11\n");
+ /* APPLE LOCAL indirect calls in R12 */
+ strcat (tmp_buf, "_pic:\n\tmflr r12\n");
- strcat (tmp_buf, "\taddis r11,r11,ha16(");
+ /* APPLE LOCAL indirect calls in R12 */
+ strcat (tmp_buf, "\taddis r12,r12,ha16(");
strcat (tmp_buf, name_buf);
strcat (tmp_buf, " - ");
strcat (tmp_buf, label);
@@ -17526,7 +19642,8 @@ macho_branch_islands (void)
strcat (tmp_buf, "\tmtlr r0\n");
- strcat (tmp_buf, "\taddi r12,r11,lo16(");
+ /* APPLE LOCAL indirect calls in R12 */
+ strcat (tmp_buf, "\taddi r12,r12,lo16(");
strcat (tmp_buf, name_buf);
strcat (tmp_buf, " - ");
strcat (tmp_buf, label);
@@ -17592,12 +19709,59 @@ output_call (rtx insn, rtx *operands, int dest_operand_number,
int cookie_operand_number)
{
static char buf[256];
+ /* APPLE LOCAL begin long-branch */
+ const char *far_call_instr_str=NULL, *near_call_instr_str=NULL;
+ rtx pattern;
+
+ switch (GET_CODE (insn))
+ {
+ case CALL_INSN:
+ far_call_instr_str = "jbsr";
+ near_call_instr_str = "bl";
+ pattern = NULL_RTX;
+ break;
+ case JUMP_INSN:
+ far_call_instr_str = "jmp";
+ near_call_instr_str = "b";
+ pattern = NULL_RTX;
+ break;
+ case INSN:
+ pattern = PATTERN (insn);
+ break;
+ default:
+ gcc_unreachable ();
+ break;
+ }
+ /* APPLE LOCAL end long-branch */
+
if (GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
&& (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
{
tree labelname;
tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
+ /* APPLE LOCAL begin long-branch */
+ /* This insn represents a prologue or epilogue. */
+ if ((pattern != NULL_RTX) && GET_CODE (pattern) == PARALLEL)
+ {
+ rtx parallel_first_op = XVECEXP (pattern, 0, 0);
+ switch (GET_CODE (parallel_first_op))
+ {
+ case CLOBBER: /* Prologue: a call to save_world. */
+ far_call_instr_str = "jbsr";
+ near_call_instr_str = "bl";
+ break;
+ case RETURN: /* Epilogue: a call to rest_world. */
+ far_call_instr_str = "jmp";
+ near_call_instr_str = "b";
+ break;
+ default:
+ abort();
+ break;
+ }
+ }
+ /* APPLE LOCAL end long-branch */
+
if (no_previous_def (funname))
{
int line_number = 0;
@@ -17607,7 +19771,13 @@ output_call (rtx insn, rtx *operands, int dest_operand_number,
CODE_LABEL_NUMBER (label_rtx));
label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
labelname = get_identifier (label_buf);
- for (; insn && GET_CODE (insn) != NOTE; insn = PREV_INSN (insn));
+ /* APPLE LOCAL begin 3910248, 3915171 */
+ for (;
+ insn && (GET_CODE (insn) != NOTE
+ || NOTE_LINE_NUMBER (insn) < 0);
+ insn = PREV_INSN (insn))
+ ;
+ /* APPLE LOCAL end 3910248, 3915171 */
if (insn)
line_number = NOTE_LINE_NUMBER (insn);
add_compiler_branch_island (labelname, funname, line_number);
@@ -17820,6 +19990,130 @@ rs6000_darwin_file_start (void)
#endif /* TARGET_MACHO */
+/* APPLE LOCAL begin Macintosh alignment 2002-1-22 --ff */
+/* Return the alignment of a struct based on the Macintosh PowerPC
+ alignment rules. In general the alignment of a struct is
+ determined by the greatest alignment of its elements. However, the
+ PowerPC rules cause the alignment of a struct to peg at word
+ alignment except when the first field has greater than word
+ (32-bit) alignment, in which case the alignment is determined by
+ the alignment of the first field. */
+
+unsigned
+round_type_align (tree the_struct, unsigned computed, unsigned specified)
+{
+ if (TREE_CODE (the_struct) == VECTOR_TYPE
+ && ALTIVEC_VECTOR_MODE (TYPE_MODE (the_struct)))
+ {
+ /* All vectors are (at least) 16-byte aligned. A struct or
+ union with a vector element is also 16-byte aligned. */
+ return MAX (RS6000_VECTOR_ALIGNMENT, MAX (computed, specified));
+ }
+
+ if (TREE_CODE (the_struct) == RECORD_TYPE
+ || TREE_CODE (the_struct) == UNION_TYPE
+ || TREE_CODE (the_struct) == QUAL_UNION_TYPE)
+ {
+ tree first_field = TYPE_FIELDS (the_struct);
+
+ /* Skip past static fields, enums, and constant fields that are
+ not really a part of the record layout. */
+ while ((first_field != 0)
+ && (TREE_CODE (first_field) != FIELD_DECL))
+ first_field = TREE_CHAIN (first_field);
+
+ if (first_field != 0)
+ {
+ /* If other-than-default alignment (which includes mac68k
+ mode) is in effect, then no adjustments to the alignment
+ should be necessary. Ditto if the struct has the
+ __packed__ attribute. */
+ if (TYPE_PACKED (the_struct) || TARGET_ALIGN_MAC68K
+ || TARGET_ALIGN_NATURAL || maximum_field_alignment != 0)
+ /* Do nothing */ ;
+ else
+ {
+ /* The following code handles Macintosh PowerPC
+ alignment. The implementation is complicated by the
+ fact that BIGGEST_ALIGNMENT is 128 when AltiVec is
+ enabled and 32 when it is not. So when AltiVec is
+ not enabled, alignment is generally limited to word
+ alignment. Consequently, the alignment of unions has
+ to be recalculated if AltiVec is not enabled.
+
+ Below we explicitly test for fields with greater than
+ word alignment: doubles, long longs, and structs and
+ arrays with greater than word alignment. */
+ unsigned val;
+ tree field_type;
+
+ val = MAX (computed, specified);
+
+ if (TREE_CODE (the_struct) == UNION_TYPE && !TARGET_ALTIVEC)
+ {
+ tree field = first_field;
+
+ while (field != 0)
+ {
+ /* Don't consider statics, enums and constant fields
+ which are not really a part of the record. */
+ if (TREE_CODE (field) != FIELD_DECL)
+ {
+ field = TREE_CHAIN (field);
+ continue;
+ }
+ field_type = TREE_TYPE(field);
+ if (TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE)
+ field_type = get_inner_array_type (field);
+ else
+ field_type = TREE_TYPE (field);
+ val = MAX (TYPE_ALIGN (field_type), val);
+ if (FLOAT_TYPE_P (field_type)
+ && TYPE_MODE (field_type) == DFmode)
+ val = MAX (RS6000_DOUBLE_ALIGNMENT, val);
+ else if (INTEGRAL_TYPE_P (field_type)
+ && TYPE_MODE (field_type) == DImode)
+ val = MAX (RS6000_LONGLONG_ALIGNMENT, val);
+ field = TREE_CHAIN (field);
+ }
+ }
+ else
+ {
+ if (TREE_CODE (TREE_TYPE (first_field)) == ARRAY_TYPE)
+ field_type = get_inner_array_type (first_field);
+ else
+ field_type = TREE_TYPE (first_field);
+
+ if (field_type == error_mark_node)
+ return val;
+ val = MAX (TYPE_ALIGN (field_type), val);
+
+ if (FLOAT_TYPE_P (field_type)
+ && TYPE_MODE (field_type) == DFmode)
+ val = MAX (RS6000_DOUBLE_ALIGNMENT, val);
+ else if (INTEGRAL_TYPE_P (field_type)
+ && TYPE_MODE (field_type) == DImode)
+ val = MAX (RS6000_LONGLONG_ALIGNMENT, val);
+ }
+
+ return val;
+ }
+ } /* first_field != 0 */
+
+ /* Ensure all MAC68K structs are at least 16-bit aligned.
+ Unless the struct has __attribute__ ((packed)). */
+
+ if (TARGET_ALIGN_MAC68K && ! TYPE_PACKED (the_struct))
+ {
+ if (computed < 16)
+ computed = 16;
+ }
+ } /* RECORD_TYPE, etc */
+
+ return (MAX (computed, specified));
+}
+/* APPLE LOCAL end Macintosh alignment 2002-1-22 --ff */
+
#if TARGET_ELF
static unsigned int
rs6000_elf_section_type_flags (tree decl, const char *name, int reloc)
@@ -18142,8 +20436,24 @@ rs6000_xcoff_file_end (void)
static bool
rs6000_binds_local_p (tree decl)
{
- return default_binds_local_p_1 (decl, 0);
+ /* APPLE LOCAL begin kext treat vtables as overridable */
+ return default_binds_local_p_1 (decl,
+ flag_apple_kext && lang_hooks.vtable_p (decl));
+}
+/* APPLE LOCAL end kext treat vtables as overridable */
+
+/* APPLE LOCAL begin pragma reverse_bitfields */
+/* Pragma reverse_bitfields. For compatibility with CW.
+ This feature is not well defined by CW, and results in
+ code that does not work in some cases! Bug compatibility
+ is the requirement, however. */
+
+static bool
+rs6000_reverse_bitfields_p (tree record_type ATTRIBUTE_UNUSED)
+{
+ return darwin_reverse_bitfields;
}
+/* APPLE LOCAL end prgama reverse_bitfields */
#endif
/* Compute a (partial) cost for rtx X. Return true if the complete
@@ -18605,128 +20915,6 @@ rs6000_complex_function_value (enum machine_mode mode)
return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
}
-/* Compose a PARALLEL for a darwin64 struct being returned by
- value. */
-
-static rtx
-rs6000_darwin64_function_value (CUMULATIVE_ARGS *cum, tree valtype)
-{
- tree f, ftype;
- rtx rvec[FIRST_PSEUDO_REGISTER], sub, roffset, suboff;
- int k = 0, bytepos, tot, elt, i, subbytepos;
- enum machine_mode fmode;
-
- switch (TREE_CODE (valtype))
- {
- case RECORD_TYPE:
- for (f = TYPE_FIELDS (valtype); f ; f = TREE_CHAIN (f))
- if (TREE_CODE (f) == FIELD_DECL)
- {
- ftype = TREE_TYPE (f);
- fmode = TYPE_MODE (ftype);
- bytepos = int_bit_position (f) / BITS_PER_UNIT;
- if (USE_FP_FOR_ARG_P (cum, fmode, ftype))
- {
- sub = gen_rtx_REG (fmode, cum->fregno++);
- cum->sysv_gregno++;
- }
- else if (USE_ALTIVEC_FOR_ARG_P (cum, fmode, ftype, 1))
- {
- sub = gen_rtx_REG (fmode, cum->vregno++);
- cum->sysv_gregno++;
- }
- else if (fmode == BLKmode
- && (TREE_CODE (ftype) == RECORD_TYPE
- || TREE_CODE (ftype) == ARRAY_TYPE))
- sub = rs6000_darwin64_function_value (cum, ftype);
- else
- sub = gen_rtx_REG (fmode, cum->sysv_gregno++);
- if (sub == NULL_RTX)
- return sub;
- else if (GET_CODE (sub) == PARALLEL)
- {
- for (i = 0; i < XVECLEN (sub, 0); i++)
- {
- rtx subsub = XVECEXP (sub, 0, i);
-
- suboff = XEXP (subsub, 1);
- subbytepos = INTVAL (suboff);
- subbytepos += bytepos;
- roffset = gen_rtx_CONST_INT (SImode, subbytepos);
- subsub = XEXP (subsub, 0);
- rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, subsub, roffset);
- }
- }
- else
- {
- roffset = gen_rtx_CONST_INT (SImode, bytepos);
- rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, sub, roffset);
- }
- }
- if (k > 0)
- return gen_rtx_PARALLEL (TYPE_MODE (valtype), gen_rtvec_v (k, rvec));
- else
- return NULL_RTX;
-
- case ARRAY_TYPE:
- /* If passing by value won't work, give up. */
- if (int_size_in_bytes (valtype) <= 0)
- return NULL_RTX;
- ftype = TREE_TYPE (valtype);
- fmode = TYPE_MODE (ftype);
- tot = int_size_in_bytes (valtype) / int_size_in_bytes (ftype);
- bytepos = 0;
- for (elt = 0; elt < tot; ++elt)
- {
- if (USE_FP_FOR_ARG_P (cum, fmode, ftype))
- {
- sub = gen_rtx_REG (fmode, cum->fregno++);
- cum->sysv_gregno++;
- }
- else if (USE_ALTIVEC_FOR_ARG_P (cum, fmode, ftype, 1))
- {
- sub = gen_rtx_REG (fmode, cum->vregno++);
- cum->sysv_gregno++;
- }
- else if (fmode == BLKmode
- && (TREE_CODE (ftype) == RECORD_TYPE
- || TREE_CODE (ftype) == ARRAY_TYPE))
- sub = rs6000_darwin64_function_value (cum, ftype);
- else
- sub = gen_rtx_REG (fmode, cum->sysv_gregno++);
- if (sub == NULL_RTX)
- return sub;
- else if (GET_CODE (sub) == PARALLEL)
- {
- for (i = 0; i < XVECLEN (sub, 0); i++)
- {
- rtx subsub = XVECEXP (sub, 0, i);
-
- suboff = XEXP (subsub, 1);
- subbytepos = INTVAL (suboff);
- subbytepos += bytepos;
- roffset = gen_rtx_CONST_INT (SImode, subbytepos);
- subsub = XEXP (subsub, 0);
- rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, subsub, roffset);
- }
- }
- else
- {
- roffset = gen_rtx_CONST_INT (SImode, bytepos);
- rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, sub, roffset);
- }
- bytepos += int_size_in_bytes (ftype);
- }
- if (k > 0)
- return gen_rtx_PARALLEL (TYPE_MODE (valtype), gen_rtvec_v (k, rvec));
- else
- return NULL_RTX;
-
- default:
- abort ();
- }
-}
-
/* Define how to find the value returned by a function.
VALTYPE is the data type of the value (as a tree).
If the precise function being called is known, FUNC is its FUNCTION_DECL;
@@ -18746,16 +20934,18 @@ rs6000_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
/* Special handling for structs in darwin64. */
if (rs6000_darwin64_abi
&& TYPE_MODE (valtype) == BLKmode
- && (TREE_CODE (valtype) == RECORD_TYPE
- || TREE_CODE (valtype) == ARRAY_TYPE))
+ && TREE_CODE (valtype) == RECORD_TYPE
+ && int_size_in_bytes (valtype) > 0)
{
CUMULATIVE_ARGS valcum;
rtx valret;
- valcum.sysv_gregno = GP_ARG_RETURN;
+ valcum.words = 0;
valcum.fregno = FP_ARG_MIN_REG;
valcum.vregno = ALTIVEC_ARG_MIN_REG;
- valret = rs6000_darwin64_function_value (&valcum, valtype);
+ /* Do a trial code generation as if this were going to be passed as
+ an argument; if any part goes in memory, we return NULL. */
+ valret = rs6000_darwin64_record_arg (&valcum, valtype, 1, true);
if (valret)
return valret;
/* Otherwise fall through to standard ABI rules. */
@@ -18934,6 +21124,43 @@ rs6000_dbx_register_number (unsigned int regno)
abort ();
}
+/* APPLE LOCAL begin CW asm blocks */
+/* Translate some register names seen in CW asm into GCC standard
+ forms. */
+
+const char *
+rs6000_cw_asm_register_name (const char *regname, char *buf)
+{
+ /* SP is a valid reg name, but asm doesn't like it yet, so translate. */
+ if (strcmp (regname, "sp") == 0)
+ return "r1";
+ if (decode_reg_name (regname) >= 0)
+ return regname;
+ /* Change "gpr0" to "r0". */
+ if (regname[0] == 'g'
+ && regname[1] == 'p'
+ && decode_reg_name (regname + 2) >= 0)
+ return regname + 2;
+ /* Change "fp0" to "f0". */
+ if (regname[0] == 'f' && regname[1] == 'p')
+ {
+ buf[0] = 'f';
+ strcpy (buf + 1, regname + 2);
+ if (decode_reg_name (buf) >= 0)
+ return buf;
+ }
+ if (regname[0] == 's'
+ && regname[1] == 'p'
+ && regname[2] == 'r'
+ )
+ /* Temp hack, return it as a number. */
+ return regname + 3;
+ if (strcmp (regname, "RTOC") == 0)
+ return "r2";
+ return NULL;
+}
+/* APPLE LOCAL end CW asm blocks */
+
/* target hook eh_return_filter_mode */
static enum machine_mode
rs6000_eh_return_filter_mode (void)
@@ -18955,5 +21182,21 @@ rs6000_vector_mode_supported_p (enum machine_mode mode)
else
return false;
}
+/* APPLE LOCAL begin mainline 2005-04-14 */
+
+/* Target hook for invalid_arg_for_unprototyped_fn. */
+static const char *
+invalid_arg_for_unprototyped_fn (tree typelist, tree funcdecl, tree val)
+{
+ return (!rs6000_darwin64_abi
+ && typelist == 0
+ && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
+ && (funcdecl == NULL_TREE
+ || (TREE_CODE (funcdecl) == FUNCTION_DECL
+ && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
+ ? N_("AltiVec argument passed to unprototyped function")
+ : NULL;
+}
+/* APPLE LOCAL end mainline 2005-04-14 */
#include "gt-rs6000.h"
diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
index 1a2d12df2ce..902b96a202d 100644
--- a/gcc/config/rs6000/rs6000.h
+++ b/gcc/config/rs6000/rs6000.h
@@ -205,6 +205,11 @@ extern int target_flags;
0x00100000, and sysv4.h uses 0x00800000 -> 0x40000000.
0x80000000 is not available because target_flags is signed. */
+/* APPLE LOCAL begin long-branch */
+/* gen call addr in register for >64M range */
+#define MASK_LONG_BRANCH 0x00200000
+/* APPLE LOCAL end long-branch */
+
#define TARGET_POWER (target_flags & MASK_POWER)
#define TARGET_POWER2 (target_flags & MASK_POWER2)
#define TARGET_POWERPC (target_flags & MASK_POWERPC)
@@ -223,6 +228,8 @@ extern int target_flags;
#define TARGET_SCHED_PROLOG (target_flags & MASK_SCHED_PROLOG)
#define TARGET_ALTIVEC (target_flags & MASK_ALTIVEC)
#define TARGET_AIX_STRUCT_RET (target_flags & MASK_AIX_STRUCT_RET)
+/* APPLE LOCAL long-branch */
+#define TARGET_LONG_BRANCH (target_flags & MASK_LONG_BRANCH)
/* Define TARGET_MFCRF if the target assembler supports the optional
field operand for mfcr and the target processor supports the
@@ -468,6 +475,11 @@ enum group_termination
{"longcall", &rs6000_longcall_switch, \
N_("Avoid all range limits on call instructions"), 0}, \
{"no-longcall", &rs6000_longcall_switch, "", 0}, \
+ /* APPLE LOCAL begin long-branch */ \
+ {"long-branch", &rs6000_longcall_switch, \
+ N_("Avoid all range limits on call instructions"), 0}, \
+ {"no-long-branch", &rs6000_longcall_switch, "", 0}, \
+ /* APPLE LOCAL end long-branch */ \
{"warn-altivec-long", &rs6000_warn_altivec_long_switch, \
N_("Warn about deprecated 'vector long ...' AltiVec type usage"), 0}, \
{"no-warn-altivec-long", &rs6000_warn_altivec_long_switch, "", 0}, \
@@ -479,6 +491,11 @@ enum group_termination
N_("Specify alignment of structure fields default/natural"), 0}, \
{"prioritize-restricted-insns=", &rs6000_sched_restricted_insns_priority_str, \
N_("Specify scheduling priority for dispatch slot restricted insns"), 0}, \
+ /* APPLE LOCAL begin AltiVec */ \
+ {"pim-altivec", &rs6000_altivec_pim_switch, \
+ N_("Enable use of Motorola AltiVec PIM operations and predicates"), 0}, \
+ {"no-pim-altivec", &rs6000_altivec_pim_switch, "", 0}, \
+ /* APPLE LOCAL end AltiVec */ \
SUBTARGET_OPTIONS \
}
@@ -541,6 +558,10 @@ extern enum rs6000_nop_insertion rs6000_sched_insert_nops;
extern int rs6000_warn_altivec_long;
extern const char *rs6000_warn_altivec_long_switch;
+/* APPLE LOCAL begin AltiVec */
+extern int rs6000_altivec_pim;
+extern const char *rs6000_altivec_pim_switch;
+/* APPLE LOCAL end AltiVec */
/* Alignment options for fields in structures for sub-targets following
AIX-like ABI.
@@ -555,6 +576,10 @@ extern const char *rs6000_warn_altivec_long_switch;
#define MASK_ALIGN_POWER 0x00000000
#define MASK_ALIGN_NATURAL 0x00000001
#define TARGET_ALIGN_NATURAL (rs6000_alignment_flags & MASK_ALIGN_NATURAL)
+/* APPLE LOCAL begin Macintosh alignment 2002-2-26 --ff */
+#define MASK_ALIGN_MAC68K 0x00000002
+#define TARGET_ALIGN_MAC68K (rs6000_alignment_flags & MASK_ALIGN_MAC68K)
+/* APPLE LOCAL end Macintosh alignment 2002-2-26 --ff */
#else
#define TARGET_ALIGN_NATURAL 0
#endif
@@ -734,6 +759,13 @@ extern const char *rs6000_warn_altivec_long_switch;
/* Allocation boundary (in *bits*) for the code of a function. */
#define FUNCTION_BOUNDARY 32
+/* APPLE LOCAL begin Macintosh alignment */
+/* Constants for alignment macros below. */
+#define RS6000_DOUBLE_ALIGNMENT 64
+#define RS6000_LONGLONG_ALIGNMENT 64
+#define RS6000_VECTOR_ALIGNMENT 128
+/* APPLE LOCAL end Macintosh alignment */
+
/* No data type wants to be aligned rounder than this. */
#define BIGGEST_ALIGNMENT 128
@@ -1660,6 +1692,10 @@ typedef struct machine_function GTY(())
{
/* Flags if __builtin_return_address (n) with n >= 1 was used. */
int ra_needs_full_frame;
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
+ /* Substitute PIC register in leaf functions */
+ unsigned int substitute_pic_base_reg;
+ /* APPLE LOCAL end volatile pic base reg in leaves */
/* Some local-dynamic symbol. */
const char *some_ld_name;
/* Whether the instruction chain has been scanned already. */
@@ -1696,6 +1732,9 @@ typedef struct rs6000_args
int stdarg; /* Whether function is a stdarg function. */
int call_cookie; /* Do special things for this call */
int sysv_gregno; /* next available GP register */
+ int intoffset; /* running offset in struct (darwin64) */
+ int use_stack; /* any part of struct on stack (darwin64) */
+ int named; /* false for varargs params */
} CUMULATIVE_ARGS;
/* Initialize a variable CUM of type CUMULATIVE_ARGS
@@ -2512,6 +2551,10 @@ extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
if ((LOG) != 0) \
fprintf (FILE, "\t.align %d\n", (LOG))
+/* APPLE LOCAL begin CW asm blocks */
+#define CW_ASM_REGISTER_NAME(STR, BUF) rs6000_cw_asm_register_name (STR, BUF)
+/* APPLE LOCAL end CW asm blocks */
+
/* Pick up the return address upon entry to a procedure. Used for
dwarf2 unwind information. This also enables the table driven
mechanism. */
@@ -2626,8 +2669,8 @@ extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
/* General flags. */
extern int flag_pic;
-extern int optimize;
-extern int flag_expensive_optimizations;
+/* APPLE LOCAL begin optimization pragmas 3124235/3420242 */
+/* APPLE LOCAL end optimization pragmas 3124235/3420242 */
extern int frame_pointer_needed;
enum rs6000_builtins
@@ -3071,5 +3114,442 @@ enum rs6000_builtins
SPE_BUILTIN_EVMWHGUMIAN,
SPE_BUILTIN_MTSPEFSCR,
SPE_BUILTIN_MFSPEFSCR,
- SPE_BUILTIN_BRINC
+ /* APPLE LOCAL begin AltiVec */
+ SPE_BUILTIN_BRINC,
+
+ /* AltiVec PIM functions, used in Apple AltiVec mode. */
+ ALTIVEC_PIM__FIRST,
+
+ /* PIM Operations. */
+ ALTIVEC_PIM_VEC_ABS = ALTIVEC_PIM__FIRST,
+ ALTIVEC_PIM_VEC_ABS_2,
+ ALTIVEC_PIM_VEC_ABS_3,
+ ALTIVEC_PIM_VEC_ABS_4,
+ ALTIVEC_PIM_VEC_ABSS,
+ ALTIVEC_PIM_VEC_ABSS_2,
+ ALTIVEC_PIM_VEC_ABSS_3,
+ ALTIVEC_PIM_VEC_ADD,
+ ALTIVEC_PIM_VEC_ADD_2,
+ ALTIVEC_PIM_VEC_ADD_3,
+ ALTIVEC_PIM_VEC_ADD_4,
+ ALTIVEC_PIM_VEC_ADDC,
+ ALTIVEC_PIM_VEC_ADDS,
+ ALTIVEC_PIM_VEC_ADDS_2,
+ ALTIVEC_PIM_VEC_ADDS_3,
+ ALTIVEC_PIM_VEC_ADDS_4,
+ ALTIVEC_PIM_VEC_ADDS_5,
+ ALTIVEC_PIM_VEC_ADDS_6,
+ ALTIVEC_PIM_VEC_AND,
+ ALTIVEC_PIM_VEC_ANDC,
+ ALTIVEC_PIM_VEC_AVG,
+ ALTIVEC_PIM_VEC_AVG_2,
+ ALTIVEC_PIM_VEC_AVG_3,
+ ALTIVEC_PIM_VEC_AVG_4,
+ ALTIVEC_PIM_VEC_AVG_5,
+ ALTIVEC_PIM_VEC_AVG_6,
+ ALTIVEC_PIM_VEC_CEIL,
+ ALTIVEC_PIM_VEC_CMPB,
+ ALTIVEC_PIM_VEC_CMPEQ,
+ ALTIVEC_PIM_VEC_CMPEQ_2,
+ ALTIVEC_PIM_VEC_CMPEQ_3,
+ ALTIVEC_PIM_VEC_CMPEQ_4,
+ ALTIVEC_PIM_VEC_CMPGE,
+ ALTIVEC_PIM_VEC_CMPGT,
+ ALTIVEC_PIM_VEC_CMPGT_2,
+ ALTIVEC_PIM_VEC_CMPGT_3,
+ ALTIVEC_PIM_VEC_CMPGT_4,
+ ALTIVEC_PIM_VEC_CMPGT_5,
+ ALTIVEC_PIM_VEC_CMPGT_6,
+ ALTIVEC_PIM_VEC_CMPGT_7,
+ ALTIVEC_PIM_VEC_CMPLE,
+ ALTIVEC_PIM_VEC_CMPLT,
+ ALTIVEC_PIM_VEC_CMPLT_2,
+ ALTIVEC_PIM_VEC_CMPLT_3,
+ ALTIVEC_PIM_VEC_CMPLT_4,
+ ALTIVEC_PIM_VEC_CMPLT_5,
+ ALTIVEC_PIM_VEC_CMPLT_6,
+ ALTIVEC_PIM_VEC_CMPLT_7,
+ ALTIVEC_PIM_VEC_CTF,
+ ALTIVEC_PIM_VEC_CTF_2,
+ ALTIVEC_PIM_VEC_CTS,
+ ALTIVEC_PIM_VEC_CTU,
+ ALTIVEC_PIM_VEC_DSS,
+ ALTIVEC_PIM_VEC_DSSALL,
+ ALTIVEC_PIM_VEC_DST,
+ ALTIVEC_PIM_VEC_DSTST,
+ ALTIVEC_PIM_VEC_DSTSTT,
+ ALTIVEC_PIM_VEC_DSTT,
+ ALTIVEC_PIM_VEC_EXPTE,
+ ALTIVEC_PIM_VEC_FLOOR,
+ ALTIVEC_PIM_VEC_LD,
+ ALTIVEC_PIM_VEC_LDE,
+ ALTIVEC_PIM_VEC_LDE_2,
+ ALTIVEC_PIM_VEC_LDE_3,
+ ALTIVEC_PIM_VEC_LDL,
+ ALTIVEC_PIM_VEC_LOGE,
+ ALTIVEC_PIM_VEC_LVEBX,
+ ALTIVEC_PIM_VEC_LVEHX,
+ ALTIVEC_PIM_VEC_LVEWX,
+ ALTIVEC_PIM_VEC_LVSL,
+ ALTIVEC_PIM_VEC_LVSR,
+ ALTIVEC_PIM_VEC_LVX,
+ ALTIVEC_PIM_VEC_LVXL,
+ ALTIVEC_PIM_VEC_MADD,
+ ALTIVEC_PIM_VEC_MADDS,
+ ALTIVEC_PIM_VEC_MAX,
+ ALTIVEC_PIM_VEC_MAX_2,
+ ALTIVEC_PIM_VEC_MAX_3,
+ ALTIVEC_PIM_VEC_MAX_4,
+ ALTIVEC_PIM_VEC_MAX_5,
+ ALTIVEC_PIM_VEC_MAX_6,
+ ALTIVEC_PIM_VEC_MAX_7,
+ ALTIVEC_PIM_VEC_MERGEH,
+ ALTIVEC_PIM_VEC_MERGEH_2,
+ ALTIVEC_PIM_VEC_MERGEH_3,
+ ALTIVEC_PIM_VEC_MERGEL,
+ ALTIVEC_PIM_VEC_MERGEL_2,
+ ALTIVEC_PIM_VEC_MERGEL_3,
+ ALTIVEC_PIM_VEC_MFVSCR,
+ ALTIVEC_PIM_VEC_MIN,
+ ALTIVEC_PIM_VEC_MIN_2,
+ ALTIVEC_PIM_VEC_MIN_3,
+ ALTIVEC_PIM_VEC_MIN_4,
+ ALTIVEC_PIM_VEC_MIN_5,
+ ALTIVEC_PIM_VEC_MIN_6,
+ ALTIVEC_PIM_VEC_MIN_7,
+ ALTIVEC_PIM_VEC_MLADD,
+ ALTIVEC_PIM_VEC_MLADD_2,
+ ALTIVEC_PIM_VEC_MRADDS,
+ ALTIVEC_PIM_VEC_MSUM,
+ ALTIVEC_PIM_VEC_MSUM_2,
+ ALTIVEC_PIM_VEC_MSUM_3,
+ ALTIVEC_PIM_VEC_MSUM_4,
+ ALTIVEC_PIM_VEC_MSUMS,
+ ALTIVEC_PIM_VEC_MSUMS_2,
+ ALTIVEC_PIM_VEC_MTVSCR,
+ ALTIVEC_PIM_VEC_MULE,
+ ALTIVEC_PIM_VEC_MULE_2,
+ ALTIVEC_PIM_VEC_MULE_3,
+ ALTIVEC_PIM_VEC_MULE_4,
+ ALTIVEC_PIM_VEC_MULO,
+ ALTIVEC_PIM_VEC_MULO_2,
+ ALTIVEC_PIM_VEC_MULO_3,
+ ALTIVEC_PIM_VEC_MULO_4,
+ ALTIVEC_PIM_VEC_NMSUB,
+ ALTIVEC_PIM_VEC_NOR,
+ ALTIVEC_PIM_VEC_OR,
+ ALTIVEC_PIM_VEC_PACK,
+ ALTIVEC_PIM_VEC_PACK_2,
+ ALTIVEC_PIM_VEC_PACKPX,
+ ALTIVEC_PIM_VEC_PACKS,
+ ALTIVEC_PIM_VEC_PACKS_2,
+ ALTIVEC_PIM_VEC_PACKS_3,
+ ALTIVEC_PIM_VEC_PACKS_4,
+ ALTIVEC_PIM_VEC_PACKSU,
+ ALTIVEC_PIM_VEC_PACKSU_2,
+ ALTIVEC_PIM_VEC_PACKSU_3,
+ ALTIVEC_PIM_VEC_PACKSU_4,
+ ALTIVEC_PIM_VEC_PERM,
+ ALTIVEC_PIM_VEC_RE,
+ ALTIVEC_PIM_VEC_RL,
+ ALTIVEC_PIM_VEC_RL_2,
+ ALTIVEC_PIM_VEC_RL_3,
+ ALTIVEC_PIM_VEC_ROUND,
+ ALTIVEC_PIM_VEC_RSQRTE,
+ ALTIVEC_PIM_VEC_SEL,
+ ALTIVEC_PIM_VEC_SL,
+ ALTIVEC_PIM_VEC_SL_2,
+ ALTIVEC_PIM_VEC_SL_3,
+ ALTIVEC_PIM_VEC_SLD,
+ ALTIVEC_PIM_VEC_SLL,
+ ALTIVEC_PIM_VEC_SLO,
+ ALTIVEC_PIM_VEC_SPLAT,
+ ALTIVEC_PIM_VEC_SPLAT_2,
+ ALTIVEC_PIM_VEC_SPLAT_3,
+ ALTIVEC_PIM_VEC_SPLAT_S8,
+ ALTIVEC_PIM_VEC_SPLAT_S16,
+ ALTIVEC_PIM_VEC_SPLAT_S32,
+ ALTIVEC_PIM_VEC_SPLAT_U8,
+ ALTIVEC_PIM_VEC_SPLAT_U16,
+ ALTIVEC_PIM_VEC_SPLAT_U32,
+ ALTIVEC_PIM_VEC_SR,
+ ALTIVEC_PIM_VEC_SR_2,
+ ALTIVEC_PIM_VEC_SR_3,
+ ALTIVEC_PIM_VEC_SRA,
+ ALTIVEC_PIM_VEC_SRA_2,
+ ALTIVEC_PIM_VEC_SRA_3,
+ ALTIVEC_PIM_VEC_SRL,
+ ALTIVEC_PIM_VEC_SRO,
+ ALTIVEC_PIM_VEC_ST,
+ ALTIVEC_PIM_VEC_STE,
+ ALTIVEC_PIM_VEC_STE_2,
+ ALTIVEC_PIM_VEC_STE_3,
+ ALTIVEC_PIM_VEC_STL,
+ ALTIVEC_PIM_VEC_STVEBX,
+ ALTIVEC_PIM_VEC_STVEHX,
+ ALTIVEC_PIM_VEC_STVEWX,
+ ALTIVEC_PIM_VEC_STVX,
+ ALTIVEC_PIM_VEC_STVXL,
+ ALTIVEC_PIM_VEC_SUB,
+ ALTIVEC_PIM_VEC_SUB_2,
+ ALTIVEC_PIM_VEC_SUB_3,
+ ALTIVEC_PIM_VEC_SUB_4,
+ ALTIVEC_PIM_VEC_SUBC,
+ ALTIVEC_PIM_VEC_SUBS,
+ ALTIVEC_PIM_VEC_SUBS_2,
+ ALTIVEC_PIM_VEC_SUBS_3,
+ ALTIVEC_PIM_VEC_SUBS_4,
+ ALTIVEC_PIM_VEC_SUBS_5,
+ ALTIVEC_PIM_VEC_SUBS_6,
+ ALTIVEC_PIM_VEC_SUM4S,
+ ALTIVEC_PIM_VEC_SUM4S_2,
+ ALTIVEC_PIM_VEC_SUM4S_3,
+ ALTIVEC_PIM_VEC_SUM2S,
+ ALTIVEC_PIM_VEC_SUMS,
+ ALTIVEC_PIM_VEC_TRUNC,
+ ALTIVEC_PIM_VEC_UNPACKH,
+ ALTIVEC_PIM_VEC_UNPACKH_2,
+ ALTIVEC_PIM_VEC_UNPACKH_3,
+ ALTIVEC_PIM_VEC_UNPACKL,
+ ALTIVEC_PIM_VEC_UNPACKL_2,
+ ALTIVEC_PIM_VEC_UNPACKL_3,
+ ALTIVEC_PIM_VEC_VADDCUW,
+ ALTIVEC_PIM_VEC_VADDFP,
+ ALTIVEC_PIM_VEC_VADDSBS,
+ ALTIVEC_PIM_VEC_VADDSHS,
+ ALTIVEC_PIM_VEC_VADDSWS,
+ ALTIVEC_PIM_VEC_VADDUBM,
+ ALTIVEC_PIM_VEC_VADDUBS,
+ ALTIVEC_PIM_VEC_VADDUHM,
+ ALTIVEC_PIM_VEC_VADDUHS,
+ ALTIVEC_PIM_VEC_VADDUWM,
+ ALTIVEC_PIM_VEC_VADDUWS,
+ ALTIVEC_PIM_VEC_VAND,
+ ALTIVEC_PIM_VEC_VANDC,
+ ALTIVEC_PIM_VEC_VAVGSB,
+ ALTIVEC_PIM_VEC_VAVGSH,
+ ALTIVEC_PIM_VEC_VAVGSW,
+ ALTIVEC_PIM_VEC_VAVGUB,
+ ALTIVEC_PIM_VEC_VAVGUH,
+ ALTIVEC_PIM_VEC_VAVGUW,
+ ALTIVEC_PIM_VEC_VCFSX,
+ ALTIVEC_PIM_VEC_VCFUX,
+ ALTIVEC_PIM_VEC_VCMPBFP,
+ ALTIVEC_PIM_VEC_VCMPEQFP,
+ ALTIVEC_PIM_VEC_VCMPEQUB,
+ ALTIVEC_PIM_VEC_VCMPEQUH,
+ ALTIVEC_PIM_VEC_VCMPEQUW,
+ ALTIVEC_PIM_VEC_VCMPGEFP,
+ ALTIVEC_PIM_VEC_VCMPGTFP,
+ ALTIVEC_PIM_VEC_VCMPGTSB,
+ ALTIVEC_PIM_VEC_VCMPGTSH,
+ ALTIVEC_PIM_VEC_VCMPGTSW,
+ ALTIVEC_PIM_VEC_VCMPGTUB,
+ ALTIVEC_PIM_VEC_VCMPGTUH,
+ ALTIVEC_PIM_VEC_VCMPGTUW,
+ ALTIVEC_PIM_VEC_VCTSXS,
+ ALTIVEC_PIM_VEC_VCTUXS,
+ ALTIVEC_PIM_VEC_VEXPTEFP,
+ ALTIVEC_PIM_VEC_VLOGEFP,
+ ALTIVEC_PIM_VEC_VMADDFP,
+ ALTIVEC_PIM_VEC_VMAXFP,
+ ALTIVEC_PIM_VEC_VMAXSB,
+ ALTIVEC_PIM_VEC_VMAXSH,
+ ALTIVEC_PIM_VEC_VMAXSW,
+ ALTIVEC_PIM_VEC_VMAXUB,
+ ALTIVEC_PIM_VEC_VMAXUH,
+ ALTIVEC_PIM_VEC_VMAXUW,
+ ALTIVEC_PIM_VEC_VMHADDSHS,
+ ALTIVEC_PIM_VEC_VMHRADDSHS,
+ ALTIVEC_PIM_VEC_VMINFP,
+ ALTIVEC_PIM_VEC_VMINSB,
+ ALTIVEC_PIM_VEC_VMINSH,
+ ALTIVEC_PIM_VEC_VMINSW,
+ ALTIVEC_PIM_VEC_VMINUB,
+ ALTIVEC_PIM_VEC_VMINUH,
+ ALTIVEC_PIM_VEC_VMINUW,
+ ALTIVEC_PIM_VEC_VMLADDUHM,
+ ALTIVEC_PIM_VEC_VMRGHB,
+ ALTIVEC_PIM_VEC_VMRGHH,
+ ALTIVEC_PIM_VEC_VMRGHW,
+ ALTIVEC_PIM_VEC_VMRGLB,
+ ALTIVEC_PIM_VEC_VMRGLH,
+ ALTIVEC_PIM_VEC_VMRGLW,
+ ALTIVEC_PIM_VEC_VMSUMMBM,
+ ALTIVEC_PIM_VEC_VMSUMSHM,
+ ALTIVEC_PIM_VEC_VMSUMSHS,
+ ALTIVEC_PIM_VEC_VMSUMUBM,
+ ALTIVEC_PIM_VEC_VMSUMUHM,
+ ALTIVEC_PIM_VEC_VMSUMUHS,
+ ALTIVEC_PIM_VEC_VMULESB,
+ ALTIVEC_PIM_VEC_VMULESH,
+ ALTIVEC_PIM_VEC_VMULEUB,
+ ALTIVEC_PIM_VEC_VMULEUH,
+ ALTIVEC_PIM_VEC_VMULOSB,
+ ALTIVEC_PIM_VEC_VMULOSH,
+ ALTIVEC_PIM_VEC_VMULOUB,
+ ALTIVEC_PIM_VEC_VMULOUH,
+ ALTIVEC_PIM_VEC_VNMSUBFP,
+ ALTIVEC_PIM_VEC_VNOR,
+ ALTIVEC_PIM_VEC_VOR,
+ ALTIVEC_PIM_VEC_VPERM,
+ ALTIVEC_PIM_VEC_VPKPX,
+ ALTIVEC_PIM_VEC_VPKSHSS,
+ ALTIVEC_PIM_VEC_VPKSHUS,
+ ALTIVEC_PIM_VEC_VPKSWSS,
+ ALTIVEC_PIM_VEC_VPKSWUS,
+ ALTIVEC_PIM_VEC_VPKUHUM,
+ ALTIVEC_PIM_VEC_VPKUHUS,
+ ALTIVEC_PIM_VEC_VPKUWUM,
+ ALTIVEC_PIM_VEC_VPKUWUS,
+ ALTIVEC_PIM_VEC_VREFP,
+ ALTIVEC_PIM_VEC_VRFIM,
+ ALTIVEC_PIM_VEC_VRFIN,
+ ALTIVEC_PIM_VEC_VRFIP,
+ ALTIVEC_PIM_VEC_VRFIZ,
+ ALTIVEC_PIM_VEC_VRLB,
+ ALTIVEC_PIM_VEC_VRLH,
+ ALTIVEC_PIM_VEC_VRLW,
+ ALTIVEC_PIM_VEC_VRSQRTEFP,
+ ALTIVEC_PIM_VEC_VSEL,
+ ALTIVEC_PIM_VEC_VSL,
+ ALTIVEC_PIM_VEC_VSLB,
+ ALTIVEC_PIM_VEC_VSLDOI,
+ ALTIVEC_PIM_VEC_VSLH,
+ ALTIVEC_PIM_VEC_VSLO,
+ ALTIVEC_PIM_VEC_VSLW,
+ ALTIVEC_PIM_VEC_VSPLTB,
+ ALTIVEC_PIM_VEC_VSPLTH,
+ ALTIVEC_PIM_VEC_VSPLTISB,
+ ALTIVEC_PIM_VEC_VSPLTISH,
+ ALTIVEC_PIM_VEC_VSPLTISW,
+ ALTIVEC_PIM_VEC_VSPLTW,
+ ALTIVEC_PIM_VEC_VSR,
+ ALTIVEC_PIM_VEC_VSRAB,
+ ALTIVEC_PIM_VEC_VSRAH,
+ ALTIVEC_PIM_VEC_VSRAW,
+ ALTIVEC_PIM_VEC_VSRB,
+ ALTIVEC_PIM_VEC_VSRH,
+ ALTIVEC_PIM_VEC_VSRO,
+ ALTIVEC_PIM_VEC_VSRW,
+ ALTIVEC_PIM_VEC_VSUBCUW,
+ ALTIVEC_PIM_VEC_VSUBFP,
+ ALTIVEC_PIM_VEC_VSUBSBS,
+ ALTIVEC_PIM_VEC_VSUBSHS,
+ ALTIVEC_PIM_VEC_VSUBSWS,
+ ALTIVEC_PIM_VEC_VSUBUBM,
+ ALTIVEC_PIM_VEC_VSUBUBS,
+ ALTIVEC_PIM_VEC_VSUBUHM,
+ ALTIVEC_PIM_VEC_VSUBUHS,
+ ALTIVEC_PIM_VEC_VSUBUWM,
+ ALTIVEC_PIM_VEC_VSUBUWS,
+ ALTIVEC_PIM_VEC_VSUM4SBS,
+ ALTIVEC_PIM_VEC_VSUM4SHS,
+ ALTIVEC_PIM_VEC_VSUM4UBS,
+ ALTIVEC_PIM_VEC_VSUM2SWS,
+ ALTIVEC_PIM_VEC_VSUMSWS,
+ ALTIVEC_PIM_VEC_VUPKHPX,
+ ALTIVEC_PIM_VEC_VUPKHSB,
+ ALTIVEC_PIM_VEC_VUPKHSH,
+ ALTIVEC_PIM_VEC_VUPKLPX,
+ ALTIVEC_PIM_VEC_VUPKLSB,
+ ALTIVEC_PIM_VEC_VUPKLSH,
+ ALTIVEC_PIM_VEC_VXOR,
+ ALTIVEC_PIM_VEC_XOR,
+
+ /* PIM Predicates. */
+ ALTIVEC_PIM_VEC_ALL_EQ,
+ ALTIVEC_PIM_VEC_ALL_EQ_2,
+ ALTIVEC_PIM_VEC_ALL_EQ_3,
+ ALTIVEC_PIM_VEC_ALL_EQ_4,
+ ALTIVEC_PIM_VEC_ALL_GE,
+ ALTIVEC_PIM_VEC_ALL_GE_2,
+ ALTIVEC_PIM_VEC_ALL_GE_3,
+ ALTIVEC_PIM_VEC_ALL_GE_4,
+ ALTIVEC_PIM_VEC_ALL_GE_5,
+ ALTIVEC_PIM_VEC_ALL_GE_6,
+ ALTIVEC_PIM_VEC_ALL_GE_7,
+ ALTIVEC_PIM_VEC_ALL_GT,
+ ALTIVEC_PIM_VEC_ALL_GT_2,
+ ALTIVEC_PIM_VEC_ALL_GT_3,
+ ALTIVEC_PIM_VEC_ALL_GT_4,
+ ALTIVEC_PIM_VEC_ALL_GT_5,
+ ALTIVEC_PIM_VEC_ALL_GT_6,
+ ALTIVEC_PIM_VEC_ALL_GT_7,
+ ALTIVEC_PIM_VEC_ALL_IN,
+ ALTIVEC_PIM_VEC_ALL_LE,
+ ALTIVEC_PIM_VEC_ALL_LE_2,
+ ALTIVEC_PIM_VEC_ALL_LE_3,
+ ALTIVEC_PIM_VEC_ALL_LE_4,
+ ALTIVEC_PIM_VEC_ALL_LE_5,
+ ALTIVEC_PIM_VEC_ALL_LE_6,
+ ALTIVEC_PIM_VEC_ALL_LE_7,
+ ALTIVEC_PIM_VEC_ALL_LT,
+ ALTIVEC_PIM_VEC_ALL_LT_2,
+ ALTIVEC_PIM_VEC_ALL_LT_3,
+ ALTIVEC_PIM_VEC_ALL_LT_4,
+ ALTIVEC_PIM_VEC_ALL_LT_5,
+ ALTIVEC_PIM_VEC_ALL_LT_6,
+ ALTIVEC_PIM_VEC_ALL_LT_7,
+ ALTIVEC_PIM_VEC_ALL_NAN,
+ ALTIVEC_PIM_VEC_ALL_NE,
+ ALTIVEC_PIM_VEC_ALL_NE_2,
+ ALTIVEC_PIM_VEC_ALL_NE_3,
+ ALTIVEC_PIM_VEC_ALL_NE_4,
+ ALTIVEC_PIM_VEC_ALL_NGE,
+ ALTIVEC_PIM_VEC_ALL_NGT,
+ ALTIVEC_PIM_VEC_ALL_NLE,
+ ALTIVEC_PIM_VEC_ALL_NLT,
+ ALTIVEC_PIM_VEC_ALL_NUMERIC,
+ ALTIVEC_PIM_VEC_ANY_EQ,
+ ALTIVEC_PIM_VEC_ANY_EQ_2,
+ ALTIVEC_PIM_VEC_ANY_EQ_3,
+ ALTIVEC_PIM_VEC_ANY_EQ_4,
+ ALTIVEC_PIM_VEC_ANY_GE,
+ ALTIVEC_PIM_VEC_ANY_GE_2,
+ ALTIVEC_PIM_VEC_ANY_GE_3,
+ ALTIVEC_PIM_VEC_ANY_GE_4,
+ ALTIVEC_PIM_VEC_ANY_GE_5,
+ ALTIVEC_PIM_VEC_ANY_GE_6,
+ ALTIVEC_PIM_VEC_ANY_GE_7,
+ ALTIVEC_PIM_VEC_ANY_GT,
+ ALTIVEC_PIM_VEC_ANY_GT_2,
+ ALTIVEC_PIM_VEC_ANY_GT_3,
+ ALTIVEC_PIM_VEC_ANY_GT_4,
+ ALTIVEC_PIM_VEC_ANY_GT_5,
+ ALTIVEC_PIM_VEC_ANY_GT_6,
+ ALTIVEC_PIM_VEC_ANY_GT_7,
+ ALTIVEC_PIM_VEC_ANY_LE,
+ ALTIVEC_PIM_VEC_ANY_LE_2,
+ ALTIVEC_PIM_VEC_ANY_LE_3,
+ ALTIVEC_PIM_VEC_ANY_LE_4,
+ ALTIVEC_PIM_VEC_ANY_LE_5,
+ ALTIVEC_PIM_VEC_ANY_LE_6,
+ ALTIVEC_PIM_VEC_ANY_LE_7,
+ ALTIVEC_PIM_VEC_ANY_LT,
+ ALTIVEC_PIM_VEC_ANY_LT_2,
+ ALTIVEC_PIM_VEC_ANY_LT_3,
+ ALTIVEC_PIM_VEC_ANY_LT_4,
+ ALTIVEC_PIM_VEC_ANY_LT_5,
+ ALTIVEC_PIM_VEC_ANY_LT_6,
+ ALTIVEC_PIM_VEC_ANY_LT_7,
+ ALTIVEC_PIM_VEC_ANY_NAN,
+ ALTIVEC_PIM_VEC_ANY_NE,
+ ALTIVEC_PIM_VEC_ANY_NE_2,
+ ALTIVEC_PIM_VEC_ANY_NE_3,
+ ALTIVEC_PIM_VEC_ANY_NE_4,
+ ALTIVEC_PIM_VEC_ANY_NGE,
+ ALTIVEC_PIM_VEC_ANY_NGT,
+ ALTIVEC_PIM_VEC_ANY_NLE,
+ ALTIVEC_PIM_VEC_ANY_NLT,
+ ALTIVEC_PIM_VEC_ANY_NUMERIC,
+ ALTIVEC_PIM_VEC_ANY_OUT,
+
+ ALTIVEC_PIM__LAST = ALTIVEC_PIM_VEC_ANY_OUT,
+ /* APPLE LOCAL end AltiVec */
+
+ /* APPLE LOCAL begin constant cfstrings */
+ RS6000_BUILTIN_MAX,
+ TARGET_BUILTIN_MAX = RS6000_BUILTIN_MAX
+ /* APPLE LOCAL end constant cfstrings */
};
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 3fdb165dd41..1ecd2f587c9 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -50,7 +50,7 @@
(UNSPEC_TLSGOTTPREL 28)
(UNSPEC_TLSTLS 29)
(UNSPEC_FIX_TRUNC_TF 30) ; fadd, rounding towards zero
- (UNSPEC_MV_CR_EQ 31) ; move_from_CR_eq_bit
+ (UNSPEC_MV_CR_GT 31) ; move_from_CR_eq_bit
])
;;
@@ -4622,8 +4622,8 @@
(minus:SF (mult:SF (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f"))
(match_operand:SF 2 "gpc_reg_operand" "f"))
(match_operand:SF 3 "gpc_reg_operand" "f")))]
- "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
- && ! HONOR_SIGNED_ZEROS (SFmode)"
+;; APPLE LOCAL do this even if honoring siged zeros
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
"fnmadds %0,%1,%2,%3"
[(set_attr "type" "fp")])
@@ -4641,8 +4641,8 @@
(minus:SF (mult:SF (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f"))
(match_operand:SF 2 "gpc_reg_operand" "f"))
(match_operand:SF 3 "gpc_reg_operand" "f")))]
- "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
- && ! HONOR_SIGNED_ZEROS (SFmode)"
+;; APPLE LOCAL do this even if honoring siged zeros
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
"{fnma|fnmadd} %0,%1,%2,%3"
[(set_attr "type" "dmul")])
@@ -4661,8 +4661,8 @@
(minus:SF (match_operand:SF 3 "gpc_reg_operand" "f")
(mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
(match_operand:SF 2 "gpc_reg_operand" "f"))))]
- "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
- && ! HONOR_SIGNED_ZEROS (SFmode)"
+;; APPLE LOCAL do this even if honoring siged zeros
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
"fnmsubs %0,%1,%2,%3"
[(set_attr "type" "fp")])
@@ -4680,8 +4680,8 @@
(minus:SF (match_operand:SF 3 "gpc_reg_operand" "f")
(mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
(match_operand:SF 2 "gpc_reg_operand" "f"))))]
- "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
- && ! HONOR_SIGNED_ZEROS (SFmode)"
+;; APPLE LOCAL do this even if honoring siged zeros
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
"{fnms|fnmsub} %0,%1,%2,%3"
[(set_attr "type" "fp")])
@@ -4984,8 +4984,8 @@
(minus:DF (mult:DF (neg:DF (match_operand:DF 1 "gpc_reg_operand" "f"))
(match_operand:DF 2 "gpc_reg_operand" "f"))
(match_operand:DF 3 "gpc_reg_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
- && ! HONOR_SIGNED_ZEROS (DFmode)"
+;; APPLE LOCAL do this even if honoring siged zeros
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
"{fnma|fnmadd} %0,%1,%2,%3"
[(set_attr "type" "dmul")])
@@ -5004,8 +5004,8 @@
(minus:DF (match_operand:DF 3 "gpc_reg_operand" "f")
(mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
(match_operand:DF 2 "gpc_reg_operand" "f"))))]
- "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
- && ! HONOR_SIGNED_ZEROS (DFmode)"
+;; APPLE LOCAL do this even if honoring siged zeros
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
"{fnms|fnmsub} %0,%1,%2,%3"
[(set_attr "type" "dmul")])
@@ -5121,7 +5121,12 @@
}
if (TARGET_POWERPC64)
{
- rtx mem = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+ /* APPLE LOCAL begin assign_stack_local_with_alias scheduling speedup */
+ /* assign_stack_local_with_alias is used instead of assign_stack_temp
+ * to get better scheduling, at the cost of some stack space. */
+ rtx mem = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode),
+ GET_MODE_ALIGNMENT (DImode));
+ /* APPLE LOCAL end assign_stack_local_with_alias scheduling speedup */
rtx t1 = gen_reg_rtx (DImode);
rtx t2 = gen_reg_rtx (DImode);
emit_insn (gen_floatsidf_ppc64 (operands[0], operands[1], mem, t1, t2));
@@ -5130,7 +5135,12 @@
operands[2] = force_reg (SImode, GEN_INT (0x43300000));
operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503601774854144\", DFmode));
- operands[4] = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0);
+ /* APPLE LOCAL begin assign_stack_local_with_alias scheduling speedup */
+ /* assign_stack_local_with_alias is used instead of assign_stack_temp to get
+ * better scheduling, at the cost of some stack space. */
+ operands[4] = assign_stack_local_with_alias (DFmode, GET_MODE_SIZE (DFmode),
+ GET_MODE_ALIGNMENT (DFmode));
+ /* APPLE LOCAL end assign_stack_local_with_alias scheduling speedup */
operands[5] = gen_reg_rtx (DFmode);
operands[6] = gen_reg_rtx (SImode);
}")
@@ -5208,7 +5218,12 @@
}
if (TARGET_POWERPC64)
{
- rtx mem = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+ /* APPLE LOCAL begin assign_stack_local_with_alias scheduling speedup */
+ /* assign_stack_local_with_alias is used instead of assign_stack_temp
+ * to get better scheduling, at the cost of some stack space. */
+ rtx mem = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode),
+ GET_MODE_ALIGNMENT (DImode));
+ /* APPLE LOCAL end assign_stack_local_with_alias scheduling speedup */
rtx t1 = gen_reg_rtx (DImode);
rtx t2 = gen_reg_rtx (DImode);
emit_insn (gen_floatunssidf_ppc64 (operands[0], operands[1], mem,
@@ -5218,7 +5233,12 @@
operands[2] = force_reg (SImode, GEN_INT (0x43300000));
operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503599627370496\", DFmode));
- operands[4] = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0);
+ /* APPLE LOCAL begin assign_stack_local_with_alias scheduling speedup */
+ /* assign_stack_local_with_alias is used instead of assign_stack_temp
+ * to get better scheduling, at the cost of some stack space. */
+ operands[4] = assign_stack_local_with_alias (DFmode, GET_MODE_SIZE (DFmode),
+ GET_MODE_ALIGNMENT (DFmode));
+ /* APPLE LOCAL end assign_stack_local_with_alias scheduling speedup */
operands[5] = gen_reg_rtx (DFmode);
}")
@@ -5282,7 +5302,12 @@
DONE;
}
operands[2] = gen_reg_rtx (DImode);
- operands[3] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+ /* APPLE LOCAL begin assign_stack_local_with_alias scheduling speedup */
+ /* assign_stack_local_with_alias is used instead of assign_stack_temp
+ * to get better scheduling, at the cost of some stack space. */
+ operands[3] = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode),
+ GET_MODE_ALIGNMENT (DImode));
+ /* APPLE LOCAL end assign_stack_local_with_alias scheduling speedup */
}")
(define_insn "*fix_truncdfsi2_internal"
@@ -8326,11 +8351,11 @@
})
(define_expand "trunctfdf2"
- [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
- (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "f")))]
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "")))]
"(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
&& TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
-"")
+ "")
(define_insn_and_split "trunctfdf2_internal1"
[(set (match_operand:DF 0 "gpc_reg_operand" "=f,?f")
@@ -10140,11 +10165,10 @@
(define_insn "load_toc_v4_PIC_1b"
[(set (match_operand:SI 0 "register_operand" "=l")
- (match_operand:SI 1 "immediate_operand" "s"))
- (use (unspec [(match_dup 1) (match_operand 2 "immediate_operand" "s")]
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "s")]
UNSPEC_TOCPTR))]
"TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2"
- "bcl 20,31,%1+4\\n%1:\\n\\t.long %2-%1"
+ "bcl 20,31,$+8\\n\\t.long %1-$"
[(set_attr "type" "branch")
(set_attr "length" "8")])
@@ -10406,6 +10430,13 @@
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn (\"creqv 6,6,6\", operands);
+/* APPLE LOCAL begin -mlongcall */
+#ifdef RS6000_LONG_BRANCH
+ if (!flag_pic)
+ return output_call(insn, operands, 0, 0);
+ else
+#endif
+/* APPLE LOCAL end -mlongcall */
return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z0@local\" : \"bl %z0\";
}"
[(set_attr "type" "branch")
@@ -10588,11 +10619,12 @@
;; operands[2] is the value FUNCTION_ARG returns for the VOID argument
;; which indicates how to set cr1
+;; APPLE LOCAL begin separate cl into c,*l; switch and attr's expanded to match
(define_insn "*call_indirect_nonlocal_sysv"
- [(call (mem:SI (match_operand:SI 0 "register_operand" "cl,cl"))
- (match_operand 1 "" "g,g"))
- (use (match_operand:SI 2 "immediate_operand" "O,n"))
- (clobber (match_scratch:SI 3 "=l,l"))]
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "c,*l,c,*l"))
+ (match_operand 1 "" "g,g,g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 3 "=l,l,l,l"))]
"DEFAULT_ABI == ABI_V4
|| DEFAULT_ABI == ABI_DARWIN"
{
@@ -10604,8 +10636,9 @@
return "b%T0l";
}
- [(set_attr "type" "jmpreg,jmpreg")
- (set_attr "length" "4,8")])
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+;; APPLE LOCAL end separate cl into c,*l; switch and attr's expanded to match
(define_insn "*call_nonlocal_sysv"
[(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s,s"))
@@ -10622,6 +10655,21 @@
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
+ /* APPLE LOCAL begin ObjC direct dispatch. */
+ /* Generate 'bla' instruction for functions with hard-coded addresses. */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ rtx note = find_reg_note (insn, REG_ABSCALL, NULL_RTX);
+ static char buf[256];
+ if (note)
+ {
+ sprintf (buf, \"bla \" HOST_WIDE_INT_PRINT_HEX,
+ INTVAL (XEXP (note, 0)));
+ return buf;
+ }
+ }
+ /* APPLE LOCAL end ObjC direct dispatch. */
+
#if TARGET_MACHO
return output_call(insn, operands, 0, 2);
#else
@@ -10631,12 +10679,13 @@
[(set_attr "type" "branch,branch")
(set_attr "length" "4,8")])
+;; APPLE LOCAL begin separate cl into c,*l; switch and attr's expanded to match
(define_insn "*call_value_indirect_nonlocal_sysv"
[(set (match_operand 0 "" "")
- (call (mem:SI (match_operand:SI 1 "register_operand" "cl,cl"))
- (match_operand 2 "" "g,g")))
- (use (match_operand:SI 3 "immediate_operand" "O,n"))
- (clobber (match_scratch:SI 4 "=l,l"))]
+ (call (mem:SI (match_operand:SI 1 "register_operand" "c,*l,c,*l"))
+ (match_operand 2 "" "g,g,g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 4 "=l,l,l,l"))]
"DEFAULT_ABI == ABI_V4
|| DEFAULT_ABI == ABI_DARWIN"
{
@@ -10648,8 +10697,9 @@
return "b%T1l";
}
- [(set_attr "type" "jmpreg,jmpreg")
- (set_attr "length" "4,8")])
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+;; APPLE LOCAL end separate cl into c,*l; switch and attr's expanded to match
(define_insn "*call_value_nonlocal_sysv"
[(set (match_operand 0 "" "")
@@ -10667,6 +10717,21 @@
else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
+ /* APPLE LOCAL begin ObjC direct dispatch. */
+ /* Generate 'bla' instruction for functions with hard-coded addresses. */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ rtx note = find_reg_note (insn, REG_ABSCALL, NULL_RTX);
+ static char buf[256];
+ if (note)
+ {
+ sprintf (buf, \"bla \" HOST_WIDE_INT_PRINT_HEX,
+ INTVAL (XEXP (note, 0)));
+ return buf;
+ }
+ }
+ /* APPLE LOCAL end ObjC direct dispatch. */
+
#if TARGET_MACHO
return output_call(insn, operands, 1, 3);
#else
@@ -10704,6 +10769,86 @@
DONE;
}")
+;; APPLE LOCAL begin sibcall patterns
+;; APPLE MERGE modify FSF patterns below instead?
+;; this and similar patterns must be marked as using LR, otherwise
+;; dataflow will try to delete the store into it. This is true
+;; even when the actual reg to jump to is in CTR, when LR was
+;; saved and restored around the PIC-setting BCL.
+(define_insn "*sibcall_symbolic"
+ [(call (mem:SI (match_operand:SI 0 "call_operand" "s,c"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand:SI 3 "register_operand" "l,l"))
+ (return)]
+ "! TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ /* APPLE LOCAL begin ObjC direct dispatch */
+ /* Generate 'ba' instruction for functions with hard-coded addresses. */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ rtx note = find_reg_note (insn, REG_ABSCALL, NULL_RTX);
+ static char buf[256];
+ if (note)
+ {
+ if (which_alternative != 0)
+ abort ();
+ sprintf (buf, \"ba \" HOST_WIDE_INT_PRINT_HEX,
+ INTVAL (XEXP (note, 0)));
+ return buf;
+ }
+ }
+ /* APPLE LOCAL end ObjC direct dispatch */
+
+ switch (which_alternative)
+ {
+ case 0: return \"b %z0\";
+ case 1: return \"b%T0\";
+ default: abort();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_symbolic"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "call_operand" "s,c"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "" ""))
+ (use (match_operand:SI 4 "register_operand" "l,l"))
+ (return)]
+ "! TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ /* APPLE LOCAL begin ObjC direct dispatch */
+ /* Generate 'ba' instruction for functions with hard-coded addresses. */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ rtx note = find_reg_note (insn, REG_ABSCALL, NULL_RTX);
+ static char buf[256];
+ if (note)
+ {
+ if (which_alternative != 0)
+ abort ();
+ sprintf (buf, \"ba \" HOST_WIDE_INT_PRINT_HEX,
+ INTVAL (XEXP (note, 0)));
+ return buf;
+ }
+ }
+ /* APPLE LOCAL end ObjC direct dispatch */
+
+ switch (which_alternative)
+ {
+ case 0: return \"b %z1\";
+ case 1: return \"b%T1\";
+ default: abort();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+;; APPLE LOCAL end sibcall patterns
+
;; sibling call patterns
(define_expand "sibcall"
[(parallel [(call (mem:SI (match_operand 0 "address_operand" ""))
@@ -11473,11 +11618,11 @@
(set_attr "length" "8")])
;; Same as above, but get the GT bit.
-(define_insn "move_from_CR_eq_bit"
+(define_insn "move_from_CR_gt_bit"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_EQ))]
+ (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_GT))]
"TARGET_E500"
- "mfcr %0\;{rlinm|rlwinm} %0,%0,%D1,1"
+ "mfcr %0\;{rlinm|rlwinm} %0,%0,%D1,31,31"
[(set_attr "type" "mfcr")
(set_attr "length" "8")])
@@ -12670,25 +12815,25 @@
"")
(define_insn_and_split ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,r")
(plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_neg_short_operand" "r,P"))
(match_operand:SI 3 "reg_or_short_operand" "rI,rI")))]
"TARGET_32BIT"
"#"
- "TARGET_32BIT"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
[(set (match_dup 0) (neg:SI (ltu:SI (match_dup 1) (match_dup 2))))
(set (match_dup 0) (minus:SI (match_dup 3) (match_dup 0)))]
"")
(define_insn_and_split ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
(plus:DI (ltu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
(match_operand:DI 2 "reg_or_neg_short_operand" "r,P"))
(match_operand:DI 3 "reg_or_short_operand" "rI,rI")))]
"TARGET_64BIT"
"#"
- "TARGET_64BIT"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
[(set (match_dup 0) (neg:DI (ltu:DI (match_dup 1) (match_dup 2))))
(set (match_dup 0) (minus:DI (match_dup 3) (match_dup 0)))]
"")
@@ -13628,25 +13773,25 @@
"")
(define_insn_and_split ""
- [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
(plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_short_operand" "rI"))
(match_operand:SI 3 "reg_or_short_operand" "rI")))]
"TARGET_32BIT"
"#"
- "TARGET_32BIT"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
[(set (match_dup 0) (neg:SI (gtu:SI (match_dup 1) (match_dup 2))))
(set (match_dup 0) (minus:SI (match_dup 3) (match_dup 0)))]
"")
(define_insn_and_split ""
- [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
(plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r")
(match_operand:DI 2 "reg_or_short_operand" "rI"))
(match_operand:DI 3 "reg_or_short_operand" "rI")))]
"TARGET_64BIT"
"#"
- "TARGET_64BIT"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
[(set (match_dup 0) (neg:DI (gtu:DI (match_dup 1) (match_dup 2))))
(set (match_dup 0) (minus:DI (match_dup 3) (match_dup 0)))]
"")
@@ -14554,6 +14699,35 @@
[(set_attr "type" "branch")
(set_attr "length" "4")])
+/* APPLE LOCAL begin unnamed*/
+(define_insn "*save_fpregs_with_label_si"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:SI 1 "register_operand" "=l"))
+ (use (match_operand:SI 2 "call_operand" "s"))
+ (use (match_operand:SI 3 "" ""))
+ (set (match_operand:DF 4 "memory_operand" "=m")
+ (match_operand:DF 5 "gpc_reg_operand" "f"))])]
+ "TARGET_32BIT"
+ "*
+#if TARGET_MACHO
+ const char *picbase = machopic_function_base_name ();
+ char *tmp;
+ operands[3] = gen_rtx_SYMBOL_REF (Pmode, ggc_alloc_string (picbase, -1));
+ if (TARGET_LONG_BRANCH)
+ {
+ tmp = ggc_alloc (strlen (XSTR (operands[2], 0)) + strlen (XSTR (operands[3], 0)) + 2);
+ strcpy (tmp, output_call(insn, operands, 2, 2));
+ strcat (tmp, \"\\n%3:\");
+ return tmp;
+ }
+ else
+#endif
+ return \"bl %z2\\n%3:\";
+"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+/* APPLE LOCAL end unnamed */
+
(define_insn "*save_fpregs_di"
[(match_parallel 0 "any_parallel_operand"
[(clobber (match_operand:DI 1 "register_operand" "=l"))
@@ -14661,7 +14835,16 @@
(set (match_operand:DF 3 "gpc_reg_operand" "=f")
(match_operand:DF 4 "memory_operand" "m"))])]
"TARGET_32BIT"
- "b %z2")
+; APPLE LOCAL begin -mlongcall
+ {
+#if TARGET_MACHO
+ if (TARGET_LONG_BRANCH)
+ return output_call(insn, operands, 2, 2);
+ else
+#endif
+ return "b %z2";
+ })
+; APPLE LOCAL end -mlongcall
(define_insn "*return_and_restore_fpregs_di"
[(match_parallel 0 "any_parallel_operand"
@@ -14671,7 +14854,16 @@
(set (match_operand:DF 3 "gpc_reg_operand" "=f")
(match_operand:DF 4 "memory_operand" "m"))])]
"TARGET_64BIT"
- "b %z2")
+; APPLE LOCAL begin -mlongcall
+ {
+#if TARGET_MACHO
+ if (TARGET_LONG_BRANCH)
+ return output_call(insn, operands, 2, 2);
+ else
+#endif
+ return "b %z2";
+ })
+; APPLE LOCAL end -mlongcall
; This is used in compiling the unwind routines.
(define_expand "eh_return"
diff --git a/gcc/config/rs6000/spe.md b/gcc/config/rs6000/spe.md
index b0459829372..79d03ff623f 100644
--- a/gcc/config/rs6000/spe.md
+++ b/gcc/config/rs6000/spe.md
@@ -29,6 +29,7 @@
(TSTDFGT_GPR 1009)
(CMPDFLT_GPR 1010)
(TSTDFLT_GPR 1011)
+ (E500_CR_IOR_COMPARE 1012)
])
(define_insn "*negsf2_gpr"
@@ -2615,14 +2616,14 @@
;; FP comparison stuff.
;; Flip the GT bit.
-(define_insn "e500_flip_eq_bit"
+(define_insn "e500_flip_gt_bit"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
(unspec:CCFP
[(match_operand:CCFP 1 "cc_reg_operand" "y")] 999))]
"!TARGET_FPRS && TARGET_HARD_FLOAT"
"*
{
- return output_e500_flip_eq_bit (operands[0], operands[1]);
+ return output_e500_flip_gt_bit (operands[0], operands[1]);
}"
[(set_attr "type" "cr_logical")])
@@ -2751,3 +2752,13 @@
"TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && flag_unsafe_math_optimizations"
"efdtstlt %0,%1,%2"
[(set_attr "type" "veccmpsimple")])
+
+;; Like cceq_ior_compare, but compare the GT bits.
+(define_insn "e500_cr_ior_compare"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP [(match_operand 1 "cc_reg_operand" "y")
+ (match_operand 2 "cc_reg_operand" "y")]
+ E500_CR_IOR_COMPARE))]
+ "TARGET_E500"
+ "cror 4*%0+gt,4*%1+gt,4*%2+gt"
+ [(set_attr "type" "cr_logical")])
diff --git a/gcc/config/rs6000/sysv4.h b/gcc/config/rs6000/sysv4.h
index db9abeae735..30a11c533e5 100644
--- a/gcc/config/rs6000/sysv4.h
+++ b/gcc/config/rs6000/sysv4.h
@@ -438,6 +438,11 @@ do { \
#define BSS_SECTION_ASM_OP "\t.section\t\".bss\""
+/* APPLE LOCAL begin hot/cold partitioning */
+#define HOT_TEXT_SECTION_NAME ".text"
+#define UNLIKELY_EXECUTED_TEXT_SECTION_NAME ".text.unlikely"
+/* APPLE LOCAL end hot/cold partitioning */
+
/* Override elfos.h definition. */
#undef INIT_SECTION_ASM_OP
#define INIT_SECTION_ASM_OP "\t.section\t\".init\",\"ax\""
diff --git a/gcc/config/rs6000/t-aix43 b/gcc/config/rs6000/t-aix43
index 4c03269bc83..b163ef8af2c 100644
--- a/gcc/config/rs6000/t-aix43
+++ b/gcc/config/rs6000/t-aix43
@@ -46,19 +46,24 @@ SHLIB_EXT = .a
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,-bE:@shlib_map_file@ -o @multilib_dir@/shr.o \
@multilib_flags@ @shlib_objs@ -lc \
- `case @shlib_base_name@ in \
+ `case @multilib_dir@ in \
*pthread*) echo -L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a ;; \
*) echo -lc ;; esac` ; \
- rm -f tmp-@shlib_base_name@.a ; \
- $(AR_CREATE_FOR_TARGET) tmp-@shlib_base_name@.a @multilib_dir@/shr.o ; \
- mv tmp-@shlib_base_name@.a @shlib_base_name@.a ; \
+ rm -f @multilib_dir@/tmp-@shlib_base_name@.a ; \
+ $(AR_CREATE_FOR_TARGET) @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/shr.o ; \
+ mv @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/@shlib_base_name@.a ; \
rm -f @multilib_dir@/shr.o
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
-SHLIB_INSTALL = $(INSTALL_DATA) @shlib_base_name@.a $$(DESTDIR)$$(slibdir)/
-SHLIB_LIBS = -lc `case @shlib_base_name@ in *pthread*) echo -lpthread ;; esac`
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@; \
+ $(INSTALL_DATA) @multilib_dir@/@shlib_base_name@.a \
+ $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@/
+SHLIB_LIBS = -lc `case @multilib_dir@ in *pthread*) echo -lpthread ;; esac`
SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
-SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver $(srcdir)/config/rs6000/libgcc-ppc64.ver
SHLIB_NM_FLAGS = -Bpg -X32_64
# GCC 128-bit long double support routines.
diff --git a/gcc/config/rs6000/t-aix52 b/gcc/config/rs6000/t-aix52
index 10e26401475..6689eba56ab 100644
--- a/gcc/config/rs6000/t-aix52
+++ b/gcc/config/rs6000/t-aix52
@@ -27,19 +27,24 @@ SHLIB_EXT = .a
SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
-Wl,-bE:@shlib_map_file@ -o @multilib_dir@/shr.o \
@multilib_flags@ @shlib_objs@ -lc \
- `case @shlib_base_name@ in \
+ `case @multilib_dir@ in \
*pthread*) echo -L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a ;; \
*) echo -lc ;; esac` ; \
- rm -f tmp-@shlib_base_name@.a ; \
- $(AR_CREATE_FOR_TARGET) tmp-@shlib_base_name@.a @multilib_dir@/shr.o ; \
- mv tmp-@shlib_base_name@.a @shlib_base_name@.a ; \
+ rm -f @multilib_dir@/tmp-@shlib_base_name@.a ; \
+ $(AR_CREATE_FOR_TARGET) @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/shr.o ; \
+ mv @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/@shlib_base_name@.a ; \
rm -f @multilib_dir@/shr.o
# $(slibdir) double quoted to protect it from expansion while building
# libgcc.mk. We want this delayed until actual install time.
-SHLIB_INSTALL = $(INSTALL_DATA) @shlib_base_name@.a $$(DESTDIR)$$(slibdir)/
-SHLIB_LIBS = -lc `case @shlib_base_name@ in *pthread*) echo -lpthread ;; esac`
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@; \
+ $(INSTALL_DATA) @multilib_dir@/@shlib_base_name@.a \
+ $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@/
+SHLIB_LIBS = -lc `case @multilib_dir@ in *pthread*) echo -lpthread ;; esac`
SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
-SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver $(srcdir)/config/rs6000/libgcc-ppc64.ver
SHLIB_NM_FLAGS = -Bpg -X32_64
# GCC 128-bit long double support routines.
diff --git a/gcc/config/rs6000/t-darwin b/gcc/config/rs6000/t-darwin
index 467c426f976..31f3f691dc4 100644
--- a/gcc/config/rs6000/t-darwin
+++ b/gcc/config/rs6000/t-darwin
@@ -1,15 +1,22 @@
+# APPLE LOCAL begin fpsave.asm moved from _STATIC_EXTRA to _EXTRA --dbj
LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-tramp.asm \
+ $(srcdir)/config/rs6000/darwin-fpsave.asm \
$(srcdir)/config/rs6000/darwin-ldouble.c
LIB2FUNCS_STATIC_EXTRA = \
- $(srcdir)/config/rs6000/darwin-fpsave.asm \
$(srcdir)/config/rs6000/darwin-vecsave.asm \
$(srcdir)/config/rs6000/darwin-world.asm
+# APPLE LOCAL end fpsave.asm moved from _STATIC_EXTRA to _EXTRA --dbj
# The .asm files above are designed to run on all processors,
# even though they use AltiVec instructions. -Wa is used because
# -force_cpusubtype_ALL doesn't work with -dynamiclib.
-TARGET_LIBGCC2_CFLAGS = -Wa,-force_cpusubtype_ALL
+#
+# -pipe because there's an assembler bug, 4077127, which causes
+# it to not properly process the first # directive, causing temporary
+# file names to appear in stabs, causing the bootstrap to fail. Using -pipe
+# works around this by not having any temporary file names.
+TARGET_LIBGCC2_CFLAGS = -Wa,-force_cpusubtype_ALL -pipe
# Export the _xlq* symbols from darwin-ldouble.c.
SHLIB_MAPFILES += $(srcdir)/config/rs6000/libgcc-ppc64.ver
diff --git a/gcc/config/rs6000/t-darwin8 b/gcc/config/rs6000/t-darwin8
new file mode 100644
index 00000000000..413ee9b13e3
--- /dev/null
+++ b/gcc/config/rs6000/t-darwin8
@@ -0,0 +1,4 @@
+# APPLE LOCAL file 64-bit
+# 64-bit libraries can only be built in Darwin 8.x or later.
+MULTILIB_OPTIONS = m64
+MULTILIB_DIRNAMES = ppc64
diff --git a/gcc/config/rs6000/t-rs6000 b/gcc/config/rs6000/t-rs6000
index caa07153ad3..7025b80a977 100644
--- a/gcc/config/rs6000/t-rs6000
+++ b/gcc/config/rs6000/t-rs6000
@@ -10,7 +10,8 @@ rs6000.o: $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(TM_P_H) $(TARGET_H) $(TARGET_DEF_H) langhooks.h reload.h gt-rs6000.h \
cfglayout.h
-rs6000-c.o: $(srcdir)/config/rs6000/rs6000-c.c \
+# APPLE LOCAL AltiVec
+rs6000-c.o: $(srcdir)/config/rs6000/rs6000-c.c options.h \
$(srcdir)/config/rs6000/rs6000-protos.h \
$(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(CPPLIB_H) \
$(TM_P_H) c-pragma.h errors.h coretypes.h $(TM_H)
diff --git a/gcc/config/rs6000/t-rtems b/gcc/config/rs6000/t-rtems
index 11a73b62fcb..3213c82f5c7 100644
--- a/gcc/config/rs6000/t-rtems
+++ b/gcc/config/rs6000/t-rtems
@@ -1,25 +1,38 @@
# Multilibs for powerpc RTEMS targets.
MULTILIB_OPTIONS = \
-mcpu=403/mcpu=505/mcpu=601/mcpu=603/mcpu=603e/mcpu=604/mcpu=750/mcpu=821/mcpu=860 \
-Dmpc509/Dmpc8260 \
D_OLD_EXCEPTIONS \
+mcpu=403/mcpu=505/mcpu=601/mcpu=603e/mcpu=604/mcpu=860/mcpu=7400 \
+Dmpc8260 \
msoft-float
MULTILIB_DIRNAMES = \
-m403 m505 m601 m603 m603e m604 m750 m821 m860 \
-mpc509 \
-mpc8260 \
roe \
+m403 m505 m601 m603e m604 m860 m7400 \
+mpc8260 \
nof
MULTILIB_EXTRA_OPTS = mrelocatable-lib mno-eabi mstrict-align
-MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT}
+# MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT}
+MULTILIB_MATCHES =
MULTILIB_MATCHES += ${MULTILIB_MATCHES_ENDIAN}
MULTILIB_MATCHES += ${MULTILIB_MATCHES_SYSV}
-MULTILIB_MATCHES += mcpu?505/Dmpc505=mcpu?505/Dmpc509
-MULTILIB_MATCHES += mcpu?603=mcpu?602
+# Map 405 to 403
+MULTILIB_MATCHES += mcpu?403=mcpu?405
+# Map 602, 603e, 603 to 603e
+MULTILIB_MATCHES += mcpu?603e=mcpu?602
+MULTILIB_MATCHES += mcpu?603e=mcpu?603
+# Map 801, 821, 823 to 860
+MULTILIB_MATCHES += mcpu?860=mcpu?801
+MULTILIB_MATCHES += mcpu?860=mcpu?821
+MULTILIB_MATCHES += mcpu?860=mcpu?823
+# Map 7450 to 7400
+MULTILIB_MATCHES += mcpu?7400=mcpu?7450
+
+# Map 750 to .
+MULTILIB_MATCHES += mcpu?750=
+
#
# RTEMS old/new-exceptions handling
@@ -34,32 +47,39 @@ MULTILIB_MATCHES += mcpu?603=mcpu?602
# Cpu-variants supporting new exception processing only
MULTILIB_NEW_EXCEPTIONS_ONLY = \
-*mcpu=505*/*D_OLD_EXCEPTIONS* \
-*mcpu=601*/*D_OLD_EXCEPTIONS* \
-*mcpu=602*/*D_OLD_EXCEPTIONS* \
-*mcpu=603/*D_OLD_EXCEPTIONS* \
-*mcpu=604*/*D_OLD_EXCEPTIONS* \
-*mcpu=750*/*D_OLD_EXCEPTIONS* \
-*mcpu=821*/*D_OLD_EXCEPTIONS* \
-*Dmpc8260*/*D_OLD_EXCEPTIONS* \
-*mcpu=860*/*D_OLD_EXCEPTIONS*
+D_OLD_EXCEPTIONS \
+D_OLD_EXCEPTIONS/msoft-float \
+D_OLD_EXCEPTIONS/mcpu=505 \
+D_OLD_EXCEPTIONS/mcpu=505/* \
+D_OLD_EXCEPTIONS/mcpu=601 \
+D_OLD_EXCEPTIONS/mcpu=601/* \
+D_OLD_EXCEPTIONS/mcpu=604 \
+D_OLD_EXCEPTIONS/mcpu=604/* \
+D_OLD_EXCEPTIONS/mcpu=750 \
+D_OLD_EXCEPTIONS/mcpu=750/* \
+D_OLD_EXCEPTIONS/mcpu=860 \
+D_OLD_EXCEPTIONS/mcpu=860/* \
+D_OLD_EXCEPTIONS/mcpu=7400 \
+D_OLD_EXCEPTIONS/mcpu=7400/* \
+D_OLD_EXCEPTIONS/*Dmpc*
# Soft-float only, default implies msoft-float
# NOTE: Must match with MULTILIB_MATCHES_FLOAT and MULTILIB_MATCHES
MULTILIB_SOFTFLOAT_ONLY = \
-mcpu=403/*msoft-float* \
-mcpu=821/*msoft-float* \
-mcpu=860/*msoft-float*
+*mcpu=401/*msoft-float* \
+*mcpu=403/*msoft-float* \
+*mcpu=405/*msoft-float* \
+*mcpu=801/*msoft-float* \
+*mcpu=821/*msoft-float* \
+*mcpu=823/*msoft-float* \
+*mcpu=860/*msoft-float*
# Hard-float only, take out msoft-float
MULTILIB_HARDFLOAT_ONLY = \
-mcpu=505/*msoft-float*
+*mcpu=505/*msoft-float*
MULTILIB_EXCEPTIONS =
-# Disallow -D_OLD_EXCEPTIONS without other options
-MULTILIB_EXCEPTIONS += D_OLD_EXCEPTIONS*
-
# Disallow -Dppc and -Dmpc without other options
MULTILIB_EXCEPTIONS += Dppc* Dmpc*
@@ -70,22 +90,10 @@ ${MULTILIB_HARDFLOAT_ONLY}
# Special rules
# Take out all variants we don't want
-MULTILIB_EXCEPTIONS += mcpu=403/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=403/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=505/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=505/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=601/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=601/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=602/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=602/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=603/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=603/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=603e/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=604/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=604/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=750/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=750/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=821/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=821/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=860/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=860/Dmpc8260*
+MULTILIB_EXCEPTIONS += *mcpu=403/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=505/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=601/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=604/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=750/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=860/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=7400/Dmpc*
diff --git a/gcc/config/rs6000/vec.h b/gcc/config/rs6000/vec.h
new file mode 100644
index 00000000000..56e8786f25b
--- /dev/null
+++ b/gcc/config/rs6000/vec.h
@@ -0,0 +1,4515 @@
+/* APPLE LOCAL file AltiVec */
+/* This file is generated by ops-to-gp. Do not edit. */
+
+/* To regenerate execute:
+ ops-to-gp -gcc vec.ops builtin.ops
+ with the current directory being gcc/config/rs6000. */
+
+static const struct builtin B1_vec_abs = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 11, "vec_abs:1", "4", CODE_FOR_xfx_perm, B_UID(0) };
+static const struct builtin B2_vec_abs = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 11, "vec_abs:2", "2", CODE_FOR_xfx_perm, B_UID(1) };
+static const struct builtin B3_vec_abs = { { &T_vec_s32, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 11, "vec_abs:3", "3", CODE_FOR_xfx_perm, B_UID(2) };
+static const struct builtin B4_vec_abs = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s8, 1, FALSE, FALSE, 11, "vec_abs:4", "1", CODE_FOR_xfx_perm, B_UID(3) };
+static const struct builtin B1_vec_abss = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 11, "vec_abss:1", "6", CODE_FOR_xfx_perm, B_UID(4) };
+static const struct builtin B2_vec_abss = { { &T_vec_s32, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 11, "vec_abss:2", "7", CODE_FOR_xfx_perm, B_UID(5) };
+static const struct builtin B3_vec_abss = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s8, 1, FALSE, FALSE, 11, "vec_abss:3", "5", CODE_FOR_xfx_perm, B_UID(6) };
+static const struct builtin B1_vec_vadduhm = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:1", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(7) };
+static const struct builtin B2_vec_vadduhm = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:2", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(8) };
+static const struct builtin B1_vec_vadduwm = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:1", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(9) };
+static const struct builtin B2_vec_vadduwm = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:2", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(10) };
+static const struct builtin B1_vec_vaddubm = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:1", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(11) };
+static const struct builtin B2_vec_vaddubm = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:2", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(12) };
+static const struct builtin B_vec_vaddfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vaddfp", "*vaddfp", CODE_FOR_xfxx_fp, B_UID(13) };
+static const struct builtin B3_vec_vadduhm = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:3", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(14) };
+static const struct builtin B4_vec_vadduhm = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:4", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(15) };
+static const struct builtin B3_vec_vadduwm = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:3", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(16) };
+static const struct builtin B4_vec_vadduwm = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:4", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(17) };
+static const struct builtin B3_vec_vaddubm = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:3", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(18) };
+static const struct builtin B4_vec_vaddubm = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:4", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(19) };
+static const struct builtin B5_vec_vadduhm = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:5", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(20) };
+static const struct builtin B6_vec_vadduhm = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:6", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(21) };
+static const struct builtin B5_vec_vadduwm = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:5", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(22) };
+static const struct builtin B6_vec_vadduwm = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:6", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(23) };
+static const struct builtin B5_vec_vaddubm = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:5", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(24) };
+static const struct builtin B6_vec_vaddubm = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:6", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(25) };
+static const struct builtin B_vec_vaddcuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vaddcuw", "*vaddcuw", CODE_FOR_xfxx_simple, B_UID(26) };
+static const struct builtin B1_vec_vaddshs = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:1", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(27) };
+static const struct builtin B1_vec_vadduhs = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:1", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(28) };
+static const struct builtin B1_vec_vaddsws = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:1", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(29) };
+static const struct builtin B1_vec_vadduws = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:1", "*vadduws", CODE_FOR_xfxx_simple, B_UID(30) };
+static const struct builtin B1_vec_vaddsbs = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:1", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(31) };
+static const struct builtin B1_vec_vaddubs = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:1", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(32) };
+static const struct builtin B2_vec_vaddshs = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:2", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(33) };
+static const struct builtin B3_vec_vaddshs = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:3", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(34) };
+static const struct builtin B2_vec_vaddsws = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:2", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(35) };
+static const struct builtin B3_vec_vaddsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:3", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(36) };
+static const struct builtin B2_vec_vaddsbs = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:2", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(37) };
+static const struct builtin B3_vec_vaddsbs = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:3", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(38) };
+static const struct builtin B2_vec_vadduhs = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:2", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(39) };
+static const struct builtin B3_vec_vadduhs = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:3", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(40) };
+static const struct builtin B2_vec_vadduws = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:2", "*vadduws", CODE_FOR_xfxx_simple, B_UID(41) };
+static const struct builtin B3_vec_vadduws = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:3", "*vadduws", CODE_FOR_xfxx_simple, B_UID(42) };
+static const struct builtin B2_vec_vaddubs = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:2", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(43) };
+static const struct builtin B3_vec_vaddubs = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:3", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(44) };
+static const struct builtin B1_vec_all_eq = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:1", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(45) };
+static const struct builtin B2_vec_all_eq = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:2", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(46) };
+static const struct builtin B3_vec_all_eq = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:3", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(47) };
+static const struct builtin B4_vec_all_eq = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:4", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(48) };
+static const struct builtin B5_vec_all_eq = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:5", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(49) };
+static const struct builtin B6_vec_all_eq = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:6", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(50) };
+static const struct builtin B7_vec_all_eq = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:7", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(51) };
+static const struct builtin B8_vec_all_eq = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:8", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(52) };
+static const struct builtin B9_vec_all_eq = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:9", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(53) };
+static const struct builtin B10_vec_all_eq = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:10", "*vcmpeqfp.", CODE_FOR_j_24_t_fxx_simple, B_UID(54) };
+static const struct builtin B11_vec_all_eq = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:11", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(55) };
+static const struct builtin B12_vec_all_eq = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:12", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(56) };
+static const struct builtin B13_vec_all_eq = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:13", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(57) };
+static const struct builtin B14_vec_all_eq = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:14", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(58) };
+static const struct builtin B15_vec_all_eq = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:15", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(59) };
+static const struct builtin B16_vec_all_eq = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:16", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(60) };
+static const struct builtin B17_vec_all_eq = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:17", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(61) };
+static const struct builtin B18_vec_all_eq = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:18", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(62) };
+static const struct builtin B19_vec_all_eq = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:19", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(63) };
+static const struct builtin B20_vec_all_eq = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:20", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(64) };
+static const struct builtin B21_vec_all_eq = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:21", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(65) };
+static const struct builtin B22_vec_all_eq = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:22", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(66) };
+static const struct builtin B23_vec_all_eq = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:23", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(67) };
+static const struct builtin B1_vec_all_ge = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:1", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(68) };
+static const struct builtin B2_vec_all_ge = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:2", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(69) };
+static const struct builtin B3_vec_all_ge = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:3", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(70) };
+static const struct builtin B4_vec_all_ge = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:4", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(71) };
+static const struct builtin B5_vec_all_ge = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:5", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(72) };
+static const struct builtin B6_vec_all_ge = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:6", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(73) };
+static const struct builtin B7_vec_all_ge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_ge:7", "*vcmpgefp.", CODE_FOR_j_24_t_fxx_simple, B_UID(74) };
+static const struct builtin B8_vec_all_ge = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:8", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(75) };
+static const struct builtin B9_vec_all_ge = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:9", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(76) };
+static const struct builtin B10_vec_all_ge = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:10", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(77) };
+static const struct builtin B11_vec_all_ge = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:11", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(78) };
+static const struct builtin B12_vec_all_ge = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:12", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(79) };
+static const struct builtin B13_vec_all_ge = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:13", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(80) };
+static const struct builtin B14_vec_all_ge = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:14", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(81) };
+static const struct builtin B15_vec_all_ge = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:15", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(82) };
+static const struct builtin B16_vec_all_ge = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:16", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(83) };
+static const struct builtin B17_vec_all_ge = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:17", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(84) };
+static const struct builtin B18_vec_all_ge = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:18", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(85) };
+static const struct builtin B19_vec_all_ge = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:19", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(86) };
+static const struct builtin B1_vec_all_gt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:1", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(87) };
+static const struct builtin B2_vec_all_gt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:2", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(88) };
+static const struct builtin B3_vec_all_gt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:3", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(89) };
+static const struct builtin B4_vec_all_gt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:4", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(90) };
+static const struct builtin B5_vec_all_gt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:5", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(91) };
+static const struct builtin B6_vec_all_gt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:6", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(92) };
+static const struct builtin B7_vec_all_gt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:7", "*vcmpgtfp.", CODE_FOR_j_24_t_fxx_simple, B_UID(93) };
+static const struct builtin B8_vec_all_gt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:8", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(94) };
+static const struct builtin B9_vec_all_gt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:9", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(95) };
+static const struct builtin B10_vec_all_gt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:10", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(96) };
+static const struct builtin B11_vec_all_gt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:11", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(97) };
+static const struct builtin B12_vec_all_gt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:12", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(98) };
+static const struct builtin B13_vec_all_gt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:13", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(99) };
+static const struct builtin B14_vec_all_gt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:14", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(100) };
+static const struct builtin B15_vec_all_gt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:15", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(101) };
+static const struct builtin B16_vec_all_gt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:16", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(102) };
+static const struct builtin B17_vec_all_gt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:17", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(103) };
+static const struct builtin B18_vec_all_gt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:18", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(104) };
+static const struct builtin B19_vec_all_gt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:19", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(105) };
+static const struct builtin B_vec_all_in = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_in", "*vcmpbfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(106) };
+static const struct builtin B1_vec_all_le = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:1", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(107) };
+static const struct builtin B2_vec_all_le = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:2", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(108) };
+static const struct builtin B3_vec_all_le = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:3", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(109) };
+static const struct builtin B4_vec_all_le = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:4", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(110) };
+static const struct builtin B5_vec_all_le = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:5", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(111) };
+static const struct builtin B6_vec_all_le = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:6", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(112) };
+static const struct builtin B7_vec_all_le = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_le:7", "*vcmpgefp.", CODE_FOR_j_24_t_frxx_simple, B_UID(113) };
+static const struct builtin B8_vec_all_le = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:8", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(114) };
+static const struct builtin B9_vec_all_le = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:9", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(115) };
+static const struct builtin B10_vec_all_le = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:10", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(116) };
+static const struct builtin B11_vec_all_le = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:11", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(117) };
+static const struct builtin B12_vec_all_le = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:12", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(118) };
+static const struct builtin B13_vec_all_le = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:13", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(119) };
+static const struct builtin B14_vec_all_le = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:14", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(120) };
+static const struct builtin B15_vec_all_le = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:15", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(121) };
+static const struct builtin B16_vec_all_le = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:16", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(122) };
+static const struct builtin B17_vec_all_le = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:17", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(123) };
+static const struct builtin B18_vec_all_le = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:18", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(124) };
+static const struct builtin B19_vec_all_le = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:19", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(125) };
+static const struct builtin B1_vec_all_lt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:1", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(126) };
+static const struct builtin B2_vec_all_lt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:2", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(127) };
+static const struct builtin B3_vec_all_lt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:3", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(128) };
+static const struct builtin B4_vec_all_lt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:4", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(129) };
+static const struct builtin B5_vec_all_lt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:5", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(130) };
+static const struct builtin B6_vec_all_lt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:6", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(131) };
+static const struct builtin B7_vec_all_lt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:7", "*vcmpgtfp.", CODE_FOR_j_24_t_frxx_simple, B_UID(132) };
+static const struct builtin B8_vec_all_lt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:8", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(133) };
+static const struct builtin B9_vec_all_lt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:9", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(134) };
+static const struct builtin B10_vec_all_lt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:10", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(135) };
+static const struct builtin B11_vec_all_lt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:11", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(136) };
+static const struct builtin B12_vec_all_lt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:12", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(137) };
+static const struct builtin B13_vec_all_lt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:13", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(138) };
+static const struct builtin B14_vec_all_lt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:14", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(139) };
+static const struct builtin B15_vec_all_lt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:15", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(140) };
+static const struct builtin B16_vec_all_lt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:16", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(141) };
+static const struct builtin B17_vec_all_lt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:17", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(142) };
+static const struct builtin B18_vec_all_lt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:18", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(143) };
+static const struct builtin B19_vec_all_lt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:19", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(144) };
+static const struct builtin B_vec_all_nan = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc26td, 1, FALSE, FALSE, 0, "vec_all_nan", "*vcmpeqfp.", CODE_FOR_j_26_t_fx_simple, B_UID(145) };
+static const struct builtin B1_vec_all_ne = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:1", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(146) };
+static const struct builtin B2_vec_all_ne = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:2", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(147) };
+static const struct builtin B3_vec_all_ne = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:3", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(148) };
+static const struct builtin B4_vec_all_ne = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:4", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(149) };
+static const struct builtin B5_vec_all_ne = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:5", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(150) };
+static const struct builtin B6_vec_all_ne = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:6", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(151) };
+static const struct builtin B7_vec_all_ne = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:7", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(152) };
+static const struct builtin B8_vec_all_ne = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:8", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(153) };
+static const struct builtin B9_vec_all_ne = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:9", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(154) };
+static const struct builtin B10_vec_all_ne = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:10", "*vcmpeqfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(155) };
+static const struct builtin B11_vec_all_ne = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:11", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(156) };
+static const struct builtin B12_vec_all_ne = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:12", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(157) };
+static const struct builtin B13_vec_all_ne = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:13", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(158) };
+static const struct builtin B14_vec_all_ne = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:14", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(159) };
+static const struct builtin B15_vec_all_ne = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:15", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(160) };
+static const struct builtin B16_vec_all_ne = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:16", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(161) };
+static const struct builtin B17_vec_all_ne = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:17", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(162) };
+static const struct builtin B18_vec_all_ne = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:18", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(163) };
+static const struct builtin B19_vec_all_ne = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:19", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(164) };
+static const struct builtin B20_vec_all_ne = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:20", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(165) };
+static const struct builtin B21_vec_all_ne = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:21", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(166) };
+static const struct builtin B22_vec_all_ne = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:22", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(167) };
+static const struct builtin B23_vec_all_ne = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:23", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(168) };
+static const struct builtin B_vec_all_nge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_nge", "*vcmpgefp.", CODE_FOR_j_26_t_fxx_simple, B_UID(169) };
+static const struct builtin B_vec_all_ngt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ngt", "*vcmpgtfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(170) };
+static const struct builtin B_vec_all_nle = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_nle", "*vcmpgefp.", CODE_FOR_j_26_t_frxx_simple, B_UID(171) };
+static const struct builtin B_vec_all_nlt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_nlt", "*vcmpgtfp.", CODE_FOR_j_26_t_frxx_simple, B_UID(172) };
+static const struct builtin B_vec_all_numeric = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc24td, 1, FALSE, FALSE, 0, "vec_all_numeric", "*vcmpeqfp.", CODE_FOR_j_24_t_fx_simple, B_UID(173) };
+static const struct builtin B1_vec_vand = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 2, "vec_vand:1", "*vand", CODE_FOR_xfxx_simple, B_UID(174) };
+static const struct builtin B2_vec_vand = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:2", "*vand", CODE_FOR_xfxx_simple, B_UID(175) };
+static const struct builtin B3_vec_vand = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:3", "*vand", CODE_FOR_xfxx_simple, B_UID(176) };
+static const struct builtin B4_vec_vand = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 2, "vec_vand:4", "*vand", CODE_FOR_xfxx_simple, B_UID(177) };
+static const struct builtin B5_vec_vand = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:5", "*vand", CODE_FOR_xfxx_simple, B_UID(178) };
+static const struct builtin B6_vec_vand = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:6", "*vand", CODE_FOR_xfxx_simple, B_UID(179) };
+static const struct builtin B7_vec_vand = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:7", "*vand", CODE_FOR_xfxx_simple, B_UID(180) };
+static const struct builtin B8_vec_vand = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 2, "vec_vand:8", "*vand", CODE_FOR_xfxx_simple, B_UID(181) };
+static const struct builtin B9_vec_vand = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:9", "*vand", CODE_FOR_xfxx_simple, B_UID(182) };
+static const struct builtin B10_vec_vand = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:10", "*vand", CODE_FOR_xfxx_simple, B_UID(183) };
+static const struct builtin B11_vec_vand = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:11", "*vand", CODE_FOR_xfxx_simple, B_UID(184) };
+static const struct builtin B12_vec_vand = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:12", "*vand", CODE_FOR_xfxx_simple, B_UID(185) };
+static const struct builtin B13_vec_vand = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:13", "*vand", CODE_FOR_xfxx_simple, B_UID(186) };
+static const struct builtin B14_vec_vand = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:14", "*vand", CODE_FOR_xfxx_simple, B_UID(187) };
+static const struct builtin B15_vec_vand = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:15", "*vand", CODE_FOR_xfxx_simple, B_UID(188) };
+static const struct builtin B16_vec_vand = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:16", "*vand", CODE_FOR_xfxx_simple, B_UID(189) };
+static const struct builtin B17_vec_vand = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:17", "*vand", CODE_FOR_xfxx_simple, B_UID(190) };
+static const struct builtin B18_vec_vand = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:18", "*vand", CODE_FOR_xfxx_simple, B_UID(191) };
+static const struct builtin B19_vec_vand = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:19", "*vand", CODE_FOR_xfxx_simple, B_UID(192) };
+static const struct builtin B20_vec_vand = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:20", "*vand", CODE_FOR_xfxx_simple, B_UID(193) };
+static const struct builtin B21_vec_vand = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:21", "*vand", CODE_FOR_xfxx_simple, B_UID(194) };
+static const struct builtin B22_vec_vand = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:22", "*vand", CODE_FOR_xfxx_simple, B_UID(195) };
+static const struct builtin B23_vec_vand = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:23", "*vand", CODE_FOR_xfxx_simple, B_UID(196) };
+static const struct builtin B24_vec_vand = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:24", "*vand", CODE_FOR_xfxx_simple, B_UID(197) };
+static const struct builtin B1_vec_vandc = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 1, "vec_vandc:1", "*vandc", CODE_FOR_xfxx_simple, B_UID(198) };
+static const struct builtin B2_vec_vandc = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:2", "*vandc", CODE_FOR_xfxx_simple, B_UID(199) };
+static const struct builtin B3_vec_vandc = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:3", "*vandc", CODE_FOR_xfxx_simple, B_UID(200) };
+static const struct builtin B4_vec_vandc = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 1, "vec_vandc:4", "*vandc", CODE_FOR_xfxx_simple, B_UID(201) };
+static const struct builtin B5_vec_vandc = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:5", "*vandc", CODE_FOR_xfxx_simple, B_UID(202) };
+static const struct builtin B6_vec_vandc = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:6", "*vandc", CODE_FOR_xfxx_simple, B_UID(203) };
+static const struct builtin B7_vec_vandc = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:7", "*vandc", CODE_FOR_xfxx_simple, B_UID(204) };
+static const struct builtin B8_vec_vandc = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 1, "vec_vandc:8", "*vandc", CODE_FOR_xfxx_simple, B_UID(205) };
+static const struct builtin B9_vec_vandc = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:9", "*vandc", CODE_FOR_xfxx_simple, B_UID(206) };
+static const struct builtin B10_vec_vandc = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:10", "*vandc", CODE_FOR_xfxx_simple, B_UID(207) };
+static const struct builtin B11_vec_vandc = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:11", "*vandc", CODE_FOR_xfxx_simple, B_UID(208) };
+static const struct builtin B12_vec_vandc = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:12", "*vandc", CODE_FOR_xfxx_simple, B_UID(209) };
+static const struct builtin B13_vec_vandc = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:13", "*vandc", CODE_FOR_xfxx_simple, B_UID(210) };
+static const struct builtin B14_vec_vandc = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:14", "*vandc", CODE_FOR_xfxx_simple, B_UID(211) };
+static const struct builtin B15_vec_vandc = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:15", "*vandc", CODE_FOR_xfxx_simple, B_UID(212) };
+static const struct builtin B16_vec_vandc = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:16", "*vandc", CODE_FOR_xfxx_simple, B_UID(213) };
+static const struct builtin B17_vec_vandc = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:17", "*vandc", CODE_FOR_xfxx_simple, B_UID(214) };
+static const struct builtin B18_vec_vandc = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:18", "*vandc", CODE_FOR_xfxx_simple, B_UID(215) };
+static const struct builtin B19_vec_vandc = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:19", "*vandc", CODE_FOR_xfxx_simple, B_UID(216) };
+static const struct builtin B20_vec_vandc = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:20", "*vandc", CODE_FOR_xfxx_simple, B_UID(217) };
+static const struct builtin B21_vec_vandc = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:21", "*vandc", CODE_FOR_xfxx_simple, B_UID(218) };
+static const struct builtin B22_vec_vandc = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:22", "*vandc", CODE_FOR_xfxx_simple, B_UID(219) };
+static const struct builtin B23_vec_vandc = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:23", "*vandc", CODE_FOR_xfxx_simple, B_UID(220) };
+static const struct builtin B24_vec_vandc = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:24", "*vandc", CODE_FOR_xfxx_simple, B_UID(221) };
+static const struct builtin B1_vec_any_eq = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:1", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(222) };
+static const struct builtin B2_vec_any_eq = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:2", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(223) };
+static const struct builtin B3_vec_any_eq = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:3", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(224) };
+static const struct builtin B4_vec_any_eq = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:4", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(225) };
+static const struct builtin B5_vec_any_eq = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:5", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(226) };
+static const struct builtin B6_vec_any_eq = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:6", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(227) };
+static const struct builtin B7_vec_any_eq = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:7", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(228) };
+static const struct builtin B8_vec_any_eq = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:8", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(229) };
+static const struct builtin B9_vec_any_eq = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:9", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(230) };
+static const struct builtin B10_vec_any_eq = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:10", "*vcmpeqfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(231) };
+static const struct builtin B11_vec_any_eq = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:11", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(232) };
+static const struct builtin B12_vec_any_eq = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:12", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(233) };
+static const struct builtin B13_vec_any_eq = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:13", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(234) };
+static const struct builtin B14_vec_any_eq = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:14", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(235) };
+static const struct builtin B15_vec_any_eq = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:15", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(236) };
+static const struct builtin B16_vec_any_eq = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:16", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(237) };
+static const struct builtin B17_vec_any_eq = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:17", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(238) };
+static const struct builtin B18_vec_any_eq = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:18", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(239) };
+static const struct builtin B19_vec_any_eq = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:19", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(240) };
+static const struct builtin B20_vec_any_eq = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:20", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(241) };
+static const struct builtin B21_vec_any_eq = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:21", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(242) };
+static const struct builtin B22_vec_any_eq = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:22", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(243) };
+static const struct builtin B23_vec_any_eq = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:23", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(244) };
+static const struct builtin B1_vec_any_ge = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:1", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(245) };
+static const struct builtin B2_vec_any_ge = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:2", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(246) };
+static const struct builtin B3_vec_any_ge = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:3", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(247) };
+static const struct builtin B4_vec_any_ge = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:4", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(248) };
+static const struct builtin B5_vec_any_ge = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:5", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(249) };
+static const struct builtin B6_vec_any_ge = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:6", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(250) };
+static const struct builtin B7_vec_any_ge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_ge:7", "*vcmpgefp.", CODE_FOR_j_26_f_fxx_simple, B_UID(251) };
+static const struct builtin B8_vec_any_ge = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:8", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(252) };
+static const struct builtin B9_vec_any_ge = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:9", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(253) };
+static const struct builtin B10_vec_any_ge = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:10", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(254) };
+static const struct builtin B11_vec_any_ge = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:11", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(255) };
+static const struct builtin B12_vec_any_ge = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:12", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(256) };
+static const struct builtin B13_vec_any_ge = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:13", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(257) };
+static const struct builtin B14_vec_any_ge = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:14", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(258) };
+static const struct builtin B15_vec_any_ge = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:15", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(259) };
+static const struct builtin B16_vec_any_ge = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:16", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(260) };
+static const struct builtin B17_vec_any_ge = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:17", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(261) };
+static const struct builtin B18_vec_any_ge = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:18", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(262) };
+static const struct builtin B19_vec_any_ge = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:19", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(263) };
+static const struct builtin B1_vec_any_gt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:1", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(264) };
+static const struct builtin B2_vec_any_gt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:2", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(265) };
+static const struct builtin B3_vec_any_gt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:3", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(266) };
+static const struct builtin B4_vec_any_gt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:4", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(267) };
+static const struct builtin B5_vec_any_gt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:5", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(268) };
+static const struct builtin B6_vec_any_gt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:6", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(269) };
+static const struct builtin B7_vec_any_gt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:7", "*vcmpgtfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(270) };
+static const struct builtin B8_vec_any_gt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:8", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(271) };
+static const struct builtin B9_vec_any_gt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:9", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(272) };
+static const struct builtin B10_vec_any_gt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:10", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(273) };
+static const struct builtin B11_vec_any_gt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:11", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(274) };
+static const struct builtin B12_vec_any_gt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:12", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(275) };
+static const struct builtin B13_vec_any_gt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:13", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(276) };
+static const struct builtin B14_vec_any_gt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:14", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(277) };
+static const struct builtin B15_vec_any_gt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:15", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(278) };
+static const struct builtin B16_vec_any_gt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:16", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(279) };
+static const struct builtin B17_vec_any_gt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:17", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(280) };
+static const struct builtin B18_vec_any_gt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:18", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(281) };
+static const struct builtin B19_vec_any_gt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:19", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(282) };
+static const struct builtin B1_vec_any_le = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:1", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(283) };
+static const struct builtin B2_vec_any_le = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:2", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(284) };
+static const struct builtin B3_vec_any_le = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:3", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(285) };
+static const struct builtin B4_vec_any_le = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:4", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(286) };
+static const struct builtin B5_vec_any_le = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:5", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(287) };
+static const struct builtin B6_vec_any_le = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:6", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(288) };
+static const struct builtin B7_vec_any_le = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_le:7", "*vcmpgefp.", CODE_FOR_j_26_f_frxx_simple, B_UID(289) };
+static const struct builtin B8_vec_any_le = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:8", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(290) };
+static const struct builtin B9_vec_any_le = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:9", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(291) };
+static const struct builtin B10_vec_any_le = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:10", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(292) };
+static const struct builtin B11_vec_any_le = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:11", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(293) };
+static const struct builtin B12_vec_any_le = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:12", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(294) };
+static const struct builtin B13_vec_any_le = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:13", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(295) };
+static const struct builtin B14_vec_any_le = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:14", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(296) };
+static const struct builtin B15_vec_any_le = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:15", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(297) };
+static const struct builtin B16_vec_any_le = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:16", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(298) };
+static const struct builtin B17_vec_any_le = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:17", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(299) };
+static const struct builtin B18_vec_any_le = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:18", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(300) };
+static const struct builtin B19_vec_any_le = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:19", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(301) };
+static const struct builtin B1_vec_any_lt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:1", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(302) };
+static const struct builtin B2_vec_any_lt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:2", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(303) };
+static const struct builtin B3_vec_any_lt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:3", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(304) };
+static const struct builtin B4_vec_any_lt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:4", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(305) };
+static const struct builtin B5_vec_any_lt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:5", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(306) };
+static const struct builtin B6_vec_any_lt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:6", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(307) };
+static const struct builtin B7_vec_any_lt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:7", "*vcmpgtfp.", CODE_FOR_j_26_f_frxx_simple, B_UID(308) };
+static const struct builtin B8_vec_any_lt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:8", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(309) };
+static const struct builtin B9_vec_any_lt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:9", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(310) };
+static const struct builtin B10_vec_any_lt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:10", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(311) };
+static const struct builtin B11_vec_any_lt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:11", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(312) };
+static const struct builtin B12_vec_any_lt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:12", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(313) };
+static const struct builtin B13_vec_any_lt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:13", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(314) };
+static const struct builtin B14_vec_any_lt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:14", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(315) };
+static const struct builtin B15_vec_any_lt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:15", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(316) };
+static const struct builtin B16_vec_any_lt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:16", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(317) };
+static const struct builtin B17_vec_any_lt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:17", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(318) };
+static const struct builtin B18_vec_any_lt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:18", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(319) };
+static const struct builtin B19_vec_any_lt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:19", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(320) };
+static const struct builtin B_vec_any_nan = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc24fd, 1, FALSE, FALSE, 0, "vec_any_nan", "*vcmpeqfp.", CODE_FOR_j_24_f_fx_simple, B_UID(321) };
+static const struct builtin B1_vec_any_ne = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:1", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(322) };
+static const struct builtin B2_vec_any_ne = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:2", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(323) };
+static const struct builtin B3_vec_any_ne = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:3", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(324) };
+static const struct builtin B4_vec_any_ne = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:4", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(325) };
+static const struct builtin B5_vec_any_ne = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:5", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(326) };
+static const struct builtin B6_vec_any_ne = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:6", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(327) };
+static const struct builtin B7_vec_any_ne = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:7", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(328) };
+static const struct builtin B8_vec_any_ne = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:8", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(329) };
+static const struct builtin B9_vec_any_ne = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:9", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(330) };
+static const struct builtin B10_vec_any_ne = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:10", "*vcmpeqfp.", CODE_FOR_j_24_f_fxx_simple, B_UID(331) };
+static const struct builtin B11_vec_any_ne = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:11", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(332) };
+static const struct builtin B12_vec_any_ne = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:12", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(333) };
+static const struct builtin B13_vec_any_ne = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:13", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(334) };
+static const struct builtin B14_vec_any_ne = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:14", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(335) };
+static const struct builtin B15_vec_any_ne = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:15", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(336) };
+static const struct builtin B16_vec_any_ne = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:16", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(337) };
+static const struct builtin B17_vec_any_ne = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:17", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(338) };
+static const struct builtin B18_vec_any_ne = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:18", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(339) };
+static const struct builtin B19_vec_any_ne = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:19", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(340) };
+static const struct builtin B20_vec_any_ne = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:20", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(341) };
+static const struct builtin B21_vec_any_ne = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:21", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(342) };
+static const struct builtin B22_vec_any_ne = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:22", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(343) };
+static const struct builtin B23_vec_any_ne = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:23", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(344) };
+static const struct builtin B_vec_any_nge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_nge", "*vcmpgefp.", CODE_FOR_j_24_f_fxx_simple, B_UID(345) };
+static const struct builtin B_vec_any_ngt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ngt", "*vcmpgtfp.", CODE_FOR_j_24_f_fxx_simple, B_UID(346) };
+static const struct builtin B_vec_any_nle = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_nle", "*vcmpgefp.", CODE_FOR_j_24_f_frxx_simple, B_UID(347) };
+static const struct builtin B_vec_any_nlt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_nlt", "*vcmpgtfp.", CODE_FOR_j_24_f_frxx_simple, B_UID(348) };
+static const struct builtin B_vec_any_numeric = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc26fd, 1, FALSE, FALSE, 0, "vec_any_numeric", "*vcmpeqfp.", CODE_FOR_j_26_f_fx_simple, B_UID(349) };
+static const struct builtin B_vec_any_out = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_out", "*vcmpbfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(350) };
+static const struct builtin B_vec_vavgsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vavgsh", "*vavgsh", CODE_FOR_xfxx_simple, B_UID(351) };
+static const struct builtin B_vec_vavgsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vavgsw", "*vavgsw", CODE_FOR_xfxx_simple, B_UID(352) };
+static const struct builtin B_vec_vavgsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vavgsb", "*vavgsb", CODE_FOR_xfxx_simple, B_UID(353) };
+static const struct builtin B_vec_vavguh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vavguh", "*vavguh", CODE_FOR_xfxx_simple, B_UID(354) };
+static const struct builtin B_vec_vavguw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vavguw", "*vavguw", CODE_FOR_xfxx_simple, B_UID(355) };
+static const struct builtin B_vec_vavgub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vavgub", "*vavgub", CODE_FOR_xfxx_simple, B_UID(356) };
+static const struct builtin B_vec_vrfip = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfip", "*vrfip", CODE_FOR_xfx_fp, B_UID(357) };
+static const struct builtin B_vec_vcmpbfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vcmpbfp", "*vcmpbfp", CODE_FOR_xfxx_simple, B_UID(358) };
+static const struct builtin B_vec_vcmpeqfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpeqfp", "*vcmpeqfp", CODE_FOR_xfxx_simple, B_UID(359) };
+static const struct builtin B1_vec_vcmpequh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 7, "vec_vcmpequh:1", "*vcmpequh", CODE_FOR_xfxx_simple, B_UID(360) };
+static const struct builtin B1_vec_vcmpequw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpequw:1", "*vcmpequw", CODE_FOR_xfxx_simple, B_UID(361) };
+static const struct builtin B1_vec_vcmpequb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 7, "vec_vcmpequb:1", "*vcmpequb", CODE_FOR_xfxx_simple, B_UID(362) };
+static const struct builtin B2_vec_vcmpequh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 7, "vec_vcmpequh:2", "*vcmpequh", CODE_FOR_xfxx_simple, B_UID(363) };
+static const struct builtin B2_vec_vcmpequw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpequw:2", "*vcmpequw", CODE_FOR_xfxx_simple, B_UID(364) };
+static const struct builtin B2_vec_vcmpequb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 7, "vec_vcmpequb:2", "*vcmpequb", CODE_FOR_xfxx_simple, B_UID(365) };
+static const struct builtin B_vec_vcmpgefp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgefp", "*vcmpgefp", CODE_FOR_xfxx_simple, B_UID(366) };
+static const struct builtin B_vec_vcmpgtfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtfp", "*vcmpgtfp", CODE_FOR_xfxx_simple, B_UID(367) };
+static const struct builtin B_vec_vcmpgtsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vcmpgtsh", "*vcmpgtsh", CODE_FOR_xfxx_simple, B_UID(368) };
+static const struct builtin B_vec_vcmpgtsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtsw", "*vcmpgtsw", CODE_FOR_xfxx_simple, B_UID(369) };
+static const struct builtin B_vec_vcmpgtsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vcmpgtsb", "*vcmpgtsb", CODE_FOR_xfxx_simple, B_UID(370) };
+static const struct builtin B_vec_vcmpgtuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vcmpgtuh", "*vcmpgtuh", CODE_FOR_xfxx_simple, B_UID(371) };
+static const struct builtin B_vec_vcmpgtuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtuw", "*vcmpgtuw", CODE_FOR_xfxx_simple, B_UID(372) };
+static const struct builtin B_vec_vcmpgtub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vcmpgtub", "*vcmpgtub", CODE_FOR_xfxx_simple, B_UID(373) };
+static const struct builtin B_vec_cmple = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmple", "*vcmpgefp", CODE_FOR_xfxx_simple, B_UID(374) };
+static const struct builtin B1_vec_cmplt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:1", "*vcmpgtfp", CODE_FOR_xfxx_simple, B_UID(375) };
+static const struct builtin B2_vec_cmplt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 10, "vec_cmplt:2", "*vcmpgtsh", CODE_FOR_xfxx_simple, B_UID(376) };
+static const struct builtin B3_vec_cmplt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:3", "*vcmpgtsw", CODE_FOR_xfxx_simple, B_UID(377) };
+static const struct builtin B4_vec_cmplt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 10, "vec_cmplt:4", "*vcmpgtsb", CODE_FOR_xfxx_simple, B_UID(378) };
+static const struct builtin B5_vec_cmplt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 10, "vec_cmplt:5", "*vcmpgtuh", CODE_FOR_xfxx_simple, B_UID(379) };
+static const struct builtin B6_vec_cmplt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:6", "*vcmpgtuw", CODE_FOR_xfxx_simple, B_UID(380) };
+static const struct builtin B7_vec_cmplt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 10, "vec_cmplt:7", "*vcmpgtub", CODE_FOR_xfxx_simple, B_UID(381) };
+static const struct builtin B_vec_vcfsx = { { &T_vec_s32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vcfsx", "*vcfsx", CODE_FOR_xfxB_fp, B_UID(382) };
+static const struct builtin B_vec_vcfux = { { &T_vec_u32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vcfux", "*vcfux", CODE_FOR_xfxB_fp, B_UID(383) };
+static const struct builtin B_vec_vctsxs = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vctsxs", "*vctsxs", CODE_FOR_xfxB_fp, B_UID(384) };
+static const struct builtin B_vec_vctuxs = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vctuxs", "*vctuxs", CODE_FOR_xfxB_fp, B_UID(385) };
+static const struct builtin B_vec_dss = { { &T_immed_u2, NULL, NULL, }, "D", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_dss", "*dss", CODE_FOR_vlfD_load, B_UID(386) };
+static const struct builtin B_vec_dssall = { { NULL, NULL, NULL, }, "", &T_volatile_void, 0, FALSE, FALSE, 0, "vec_dssall", "*dssall", CODE_FOR_vlf_load, B_UID(387) };
+static const struct builtin B1_vec_dst = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:1", "*dst", CODE_FOR_vlfiiD_load, B_UID(388) };
+static const struct builtin B2_vec_dst = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:2", "*dst", CODE_FOR_vlfiiD_load, B_UID(389) };
+static const struct builtin B3_vec_dst = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:3", "*dst", CODE_FOR_vlfiiD_load, B_UID(390) };
+static const struct builtin B4_vec_dst = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:4", "*dst", CODE_FOR_vlfiiD_load, B_UID(391) };
+static const struct builtin B5_vec_dst = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:5", "*dst", CODE_FOR_vlfiiD_load, B_UID(392) };
+static const struct builtin B6_vec_dst = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:6", "*dst", CODE_FOR_vlfiiD_load, B_UID(393) };
+static const struct builtin B7_vec_dst = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:7", "*dst", CODE_FOR_vlfiiD_load, B_UID(394) };
+static const struct builtin B8_vec_dst = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:8", "*dst", CODE_FOR_vlfiiD_load, B_UID(395) };
+static const struct builtin B9_vec_dst = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:9", "*dst", CODE_FOR_vlfiiD_load, B_UID(396) };
+static const struct builtin B10_vec_dst = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:10", "*dst", CODE_FOR_vlfiiD_load, B_UID(397) };
+static const struct builtin B11_vec_dst = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:11", "*dst", CODE_FOR_vlfiiD_load, B_UID(398) };
+static const struct builtin B12_vec_dst = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:12", "*dst", CODE_FOR_vlfiiD_load, B_UID(399) };
+static const struct builtin B13_vec_dst = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:13", "*dst", CODE_FOR_vlfiiD_load, B_UID(400) };
+static const struct builtin B14_vec_dst = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:14", "*dst", CODE_FOR_vlfiiD_load, B_UID(401) };
+static const struct builtin B15_vec_dst = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:15", "*dst", CODE_FOR_vlfiiD_load, B_UID(402) };
+static const struct builtin B16_vec_dst = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:16", "*dst", CODE_FOR_vlfiiD_load, B_UID(403) };
+static const struct builtin B17_vec_dst = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:17", "*dst", CODE_FOR_vlfiiD_load, B_UID(404) };
+static const struct builtin B18_vec_dst = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:18", "*dst", CODE_FOR_vlfiiD_load, B_UID(405) };
+static const struct builtin B19_vec_dst = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:19", "*dst", CODE_FOR_vlfiiD_load, B_UID(406) };
+static const struct builtin B20_vec_dst = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:20", "*dst", CODE_FOR_vlfiiD_load, B_UID(407) };
+static const struct builtin B1_vec_dstst = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:1", "*dstst", CODE_FOR_vlfiiD_load, B_UID(408) };
+static const struct builtin B2_vec_dstst = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:2", "*dstst", CODE_FOR_vlfiiD_load, B_UID(409) };
+static const struct builtin B3_vec_dstst = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:3", "*dstst", CODE_FOR_vlfiiD_load, B_UID(410) };
+static const struct builtin B4_vec_dstst = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:4", "*dstst", CODE_FOR_vlfiiD_load, B_UID(411) };
+static const struct builtin B5_vec_dstst = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:5", "*dstst", CODE_FOR_vlfiiD_load, B_UID(412) };
+static const struct builtin B6_vec_dstst = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:6", "*dstst", CODE_FOR_vlfiiD_load, B_UID(413) };
+static const struct builtin B7_vec_dstst = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:7", "*dstst", CODE_FOR_vlfiiD_load, B_UID(414) };
+static const struct builtin B8_vec_dstst = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:8", "*dstst", CODE_FOR_vlfiiD_load, B_UID(415) };
+static const struct builtin B9_vec_dstst = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:9", "*dstst", CODE_FOR_vlfiiD_load, B_UID(416) };
+static const struct builtin B10_vec_dstst = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:10", "*dstst", CODE_FOR_vlfiiD_load, B_UID(417) };
+static const struct builtin B11_vec_dstst = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:11", "*dstst", CODE_FOR_vlfiiD_load, B_UID(418) };
+static const struct builtin B12_vec_dstst = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:12", "*dstst", CODE_FOR_vlfiiD_load, B_UID(419) };
+static const struct builtin B13_vec_dstst = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:13", "*dstst", CODE_FOR_vlfiiD_load, B_UID(420) };
+static const struct builtin B14_vec_dstst = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:14", "*dstst", CODE_FOR_vlfiiD_load, B_UID(421) };
+static const struct builtin B15_vec_dstst = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:15", "*dstst", CODE_FOR_vlfiiD_load, B_UID(422) };
+static const struct builtin B16_vec_dstst = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:16", "*dstst", CODE_FOR_vlfiiD_load, B_UID(423) };
+static const struct builtin B17_vec_dstst = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:17", "*dstst", CODE_FOR_vlfiiD_load, B_UID(424) };
+static const struct builtin B18_vec_dstst = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:18", "*dstst", CODE_FOR_vlfiiD_load, B_UID(425) };
+static const struct builtin B19_vec_dstst = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:19", "*dstst", CODE_FOR_vlfiiD_load, B_UID(426) };
+static const struct builtin B20_vec_dstst = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:20", "*dstst", CODE_FOR_vlfiiD_load, B_UID(427) };
+static const struct builtin B1_vec_dststt = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:1", "*dststt", CODE_FOR_vlfiiD_load, B_UID(428) };
+static const struct builtin B2_vec_dststt = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:2", "*dststt", CODE_FOR_vlfiiD_load, B_UID(429) };
+static const struct builtin B3_vec_dststt = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:3", "*dststt", CODE_FOR_vlfiiD_load, B_UID(430) };
+static const struct builtin B4_vec_dststt = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:4", "*dststt", CODE_FOR_vlfiiD_load, B_UID(431) };
+static const struct builtin B5_vec_dststt = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:5", "*dststt", CODE_FOR_vlfiiD_load, B_UID(432) };
+static const struct builtin B6_vec_dststt = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:6", "*dststt", CODE_FOR_vlfiiD_load, B_UID(433) };
+static const struct builtin B7_vec_dststt = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:7", "*dststt", CODE_FOR_vlfiiD_load, B_UID(434) };
+static const struct builtin B8_vec_dststt = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:8", "*dststt", CODE_FOR_vlfiiD_load, B_UID(435) };
+static const struct builtin B9_vec_dststt = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:9", "*dststt", CODE_FOR_vlfiiD_load, B_UID(436) };
+static const struct builtin B10_vec_dststt = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:10", "*dststt", CODE_FOR_vlfiiD_load, B_UID(437) };
+static const struct builtin B11_vec_dststt = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:11", "*dststt", CODE_FOR_vlfiiD_load, B_UID(438) };
+static const struct builtin B12_vec_dststt = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:12", "*dststt", CODE_FOR_vlfiiD_load, B_UID(439) };
+static const struct builtin B13_vec_dststt = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:13", "*dststt", CODE_FOR_vlfiiD_load, B_UID(440) };
+static const struct builtin B14_vec_dststt = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:14", "*dststt", CODE_FOR_vlfiiD_load, B_UID(441) };
+static const struct builtin B15_vec_dststt = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:15", "*dststt", CODE_FOR_vlfiiD_load, B_UID(442) };
+static const struct builtin B16_vec_dststt = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:16", "*dststt", CODE_FOR_vlfiiD_load, B_UID(443) };
+static const struct builtin B17_vec_dststt = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:17", "*dststt", CODE_FOR_vlfiiD_load, B_UID(444) };
+static const struct builtin B18_vec_dststt = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:18", "*dststt", CODE_FOR_vlfiiD_load, B_UID(445) };
+static const struct builtin B19_vec_dststt = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:19", "*dststt", CODE_FOR_vlfiiD_load, B_UID(446) };
+static const struct builtin B20_vec_dststt = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:20", "*dststt", CODE_FOR_vlfiiD_load, B_UID(447) };
+static const struct builtin B1_vec_dstt = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:1", "*dstt", CODE_FOR_vlfiiD_load, B_UID(448) };
+static const struct builtin B2_vec_dstt = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:2", "*dstt", CODE_FOR_vlfiiD_load, B_UID(449) };
+static const struct builtin B3_vec_dstt = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:3", "*dstt", CODE_FOR_vlfiiD_load, B_UID(450) };
+static const struct builtin B4_vec_dstt = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:4", "*dstt", CODE_FOR_vlfiiD_load, B_UID(451) };
+static const struct builtin B5_vec_dstt = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:5", "*dstt", CODE_FOR_vlfiiD_load, B_UID(452) };
+static const struct builtin B6_vec_dstt = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:6", "*dstt", CODE_FOR_vlfiiD_load, B_UID(453) };
+static const struct builtin B7_vec_dstt = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:7", "*dstt", CODE_FOR_vlfiiD_load, B_UID(454) };
+static const struct builtin B8_vec_dstt = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:8", "*dstt", CODE_FOR_vlfiiD_load, B_UID(455) };
+static const struct builtin B9_vec_dstt = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:9", "*dstt", CODE_FOR_vlfiiD_load, B_UID(456) };
+static const struct builtin B10_vec_dstt = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:10", "*dstt", CODE_FOR_vlfiiD_load, B_UID(457) };
+static const struct builtin B11_vec_dstt = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:11", "*dstt", CODE_FOR_vlfiiD_load, B_UID(458) };
+static const struct builtin B12_vec_dstt = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:12", "*dstt", CODE_FOR_vlfiiD_load, B_UID(459) };
+static const struct builtin B13_vec_dstt = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:13", "*dstt", CODE_FOR_vlfiiD_load, B_UID(460) };
+static const struct builtin B14_vec_dstt = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:14", "*dstt", CODE_FOR_vlfiiD_load, B_UID(461) };
+static const struct builtin B15_vec_dstt = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:15", "*dstt", CODE_FOR_vlfiiD_load, B_UID(462) };
+static const struct builtin B16_vec_dstt = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:16", "*dstt", CODE_FOR_vlfiiD_load, B_UID(463) };
+static const struct builtin B17_vec_dstt = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:17", "*dstt", CODE_FOR_vlfiiD_load, B_UID(464) };
+static const struct builtin B18_vec_dstt = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:18", "*dstt", CODE_FOR_vlfiiD_load, B_UID(465) };
+static const struct builtin B19_vec_dstt = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:19", "*dstt", CODE_FOR_vlfiiD_load, B_UID(466) };
+static const struct builtin B20_vec_dstt = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:20", "*dstt", CODE_FOR_vlfiiD_load, B_UID(467) };
+static const struct builtin B_vec_vexptefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vexptefp", "*vexptefp", CODE_FOR_xfx_fp, B_UID(468) };
+static const struct builtin B_vec_vrfim = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfim", "*vrfim", CODE_FOR_xfx_fp, B_UID(469) };
+static const struct builtin B1_vec_lvx = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvx:1", "*lvx", CODE_FOR_xlfii_load, B_UID(470) };
+static const struct builtin B2_vec_lvx = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:2", "*lvx", CODE_FOR_xlfii_load, B_UID(471) };
+static const struct builtin B3_vec_lvx = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:3", "*lvx", CODE_FOR_xlfii_load, B_UID(472) };
+static const struct builtin B4_vec_lvx = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvx:4", "*lvx", CODE_FOR_xlfii_load, B_UID(473) };
+static const struct builtin B5_vec_lvx = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvx:5", "*lvx", CODE_FOR_xlfii_load, B_UID(474) };
+static const struct builtin B6_vec_lvx = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvx:6", "*lvx", CODE_FOR_xlfii_load, B_UID(475) };
+static const struct builtin B7_vec_lvx = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:7", "*lvx", CODE_FOR_xlfii_load, B_UID(476) };
+static const struct builtin B8_vec_lvx = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:8", "*lvx", CODE_FOR_xlfii_load, B_UID(477) };
+static const struct builtin B9_vec_lvx = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvx:9", "*lvx", CODE_FOR_xlfii_load, B_UID(478) };
+static const struct builtin B10_vec_lvx = { { &T_int, &T_const_vec_b16_ptr, NULL, }, "ii", &T_vec_b16, 2, TRUE, FALSE, 0, "vec_lvx:10", "*lvx", CODE_FOR_xlfii_load, B_UID(479) };
+static const struct builtin B11_vec_lvx = { { &T_int, &T_const_vec_b32_ptr, NULL, }, "ii", &T_vec_b32, 2, TRUE, FALSE, 0, "vec_lvx:11", "*lvx", CODE_FOR_xlfii_load, B_UID(480) };
+static const struct builtin B12_vec_lvx = { { &T_int, &T_const_vec_b8_ptr, NULL, }, "ii", &T_vec_b8, 2, TRUE, FALSE, 0, "vec_lvx:12", "*lvx", CODE_FOR_xlfii_load, B_UID(481) };
+static const struct builtin B13_vec_lvx = { { &T_int, &T_const_vec_f32_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvx:13", "*lvx", CODE_FOR_xlfii_load, B_UID(482) };
+static const struct builtin B14_vec_lvx = { { &T_int, &T_const_vec_p16_ptr, NULL, }, "ii", &T_vec_p16, 2, TRUE, FALSE, 0, "vec_lvx:14", "*lvx", CODE_FOR_xlfii_load, B_UID(483) };
+static const struct builtin B15_vec_lvx = { { &T_int, &T_const_vec_s16_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvx:15", "*lvx", CODE_FOR_xlfii_load, B_UID(484) };
+static const struct builtin B16_vec_lvx = { { &T_int, &T_const_vec_s32_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:16", "*lvx", CODE_FOR_xlfii_load, B_UID(485) };
+static const struct builtin B17_vec_lvx = { { &T_int, &T_const_vec_s8_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvx:17", "*lvx", CODE_FOR_xlfii_load, B_UID(486) };
+static const struct builtin B18_vec_lvx = { { &T_int, &T_const_vec_u16_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvx:18", "*lvx", CODE_FOR_xlfii_load, B_UID(487) };
+static const struct builtin B19_vec_lvx = { { &T_int, &T_const_vec_u32_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:19", "*lvx", CODE_FOR_xlfii_load, B_UID(488) };
+static const struct builtin B20_vec_lvx = { { &T_int, &T_const_vec_u8_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvx:20", "*lvx", CODE_FOR_xlfii_load, B_UID(489) };
+static const struct builtin B1_vec_lvewx = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvewx:1", "*lvewx", CODE_FOR_xlfii_load, B_UID(490) };
+static const struct builtin B2_vec_lvewx = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvewx:2", "*lvewx", CODE_FOR_xlfii_load, B_UID(491) };
+static const struct builtin B3_vec_lvewx = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvewx:3", "*lvewx", CODE_FOR_xlfii_load, B_UID(492) };
+static const struct builtin B1_vec_lvehx = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvehx:1", "*lvehx", CODE_FOR_xlfii_load, B_UID(493) };
+static const struct builtin B1_vec_lvebx = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvebx:1", "*lvebx", CODE_FOR_xlfii_load, B_UID(494) };
+static const struct builtin B2_vec_lvebx = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvebx:2", "*lvebx", CODE_FOR_xlfii_load, B_UID(495) };
+static const struct builtin B4_vec_lvewx = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvewx:4", "*lvewx", CODE_FOR_xlfii_load, B_UID(496) };
+static const struct builtin B5_vec_lvewx = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvewx:5", "*lvewx", CODE_FOR_xlfii_load, B_UID(497) };
+static const struct builtin B2_vec_lvehx = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvehx:2", "*lvehx", CODE_FOR_xlfii_load, B_UID(498) };
+static const struct builtin B1_vec_lvxl = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvxl:1", "*lvxl", CODE_FOR_xlfii_load, B_UID(499) };
+static const struct builtin B2_vec_lvxl = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:2", "*lvxl", CODE_FOR_xlfii_load, B_UID(500) };
+static const struct builtin B3_vec_lvxl = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:3", "*lvxl", CODE_FOR_xlfii_load, B_UID(501) };
+static const struct builtin B4_vec_lvxl = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvxl:4", "*lvxl", CODE_FOR_xlfii_load, B_UID(502) };
+static const struct builtin B5_vec_lvxl = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvxl:5", "*lvxl", CODE_FOR_xlfii_load, B_UID(503) };
+static const struct builtin B6_vec_lvxl = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvxl:6", "*lvxl", CODE_FOR_xlfii_load, B_UID(504) };
+static const struct builtin B7_vec_lvxl = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:7", "*lvxl", CODE_FOR_xlfii_load, B_UID(505) };
+static const struct builtin B8_vec_lvxl = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:8", "*lvxl", CODE_FOR_xlfii_load, B_UID(506) };
+static const struct builtin B9_vec_lvxl = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvxl:9", "*lvxl", CODE_FOR_xlfii_load, B_UID(507) };
+static const struct builtin B10_vec_lvxl = { { &T_int, &T_const_vec_b16_ptr, NULL, }, "ii", &T_vec_b16, 2, TRUE, FALSE, 0, "vec_lvxl:10", "*lvxl", CODE_FOR_xlfii_load, B_UID(508) };
+static const struct builtin B11_vec_lvxl = { { &T_int, &T_const_vec_b32_ptr, NULL, }, "ii", &T_vec_b32, 2, TRUE, FALSE, 0, "vec_lvxl:11", "*lvxl", CODE_FOR_xlfii_load, B_UID(509) };
+static const struct builtin B12_vec_lvxl = { { &T_int, &T_const_vec_b8_ptr, NULL, }, "ii", &T_vec_b8, 2, TRUE, FALSE, 0, "vec_lvxl:12", "*lvxl", CODE_FOR_xlfii_load, B_UID(510) };
+static const struct builtin B13_vec_lvxl = { { &T_int, &T_const_vec_f32_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvxl:13", "*lvxl", CODE_FOR_xlfii_load, B_UID(511) };
+static const struct builtin B14_vec_lvxl = { { &T_int, &T_const_vec_p16_ptr, NULL, }, "ii", &T_vec_p16, 2, TRUE, FALSE, 0, "vec_lvxl:14", "*lvxl", CODE_FOR_xlfii_load, B_UID(512) };
+static const struct builtin B15_vec_lvxl = { { &T_int, &T_const_vec_s16_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvxl:15", "*lvxl", CODE_FOR_xlfii_load, B_UID(513) };
+static const struct builtin B16_vec_lvxl = { { &T_int, &T_const_vec_s32_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:16", "*lvxl", CODE_FOR_xlfii_load, B_UID(514) };
+static const struct builtin B17_vec_lvxl = { { &T_int, &T_const_vec_s8_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvxl:17", "*lvxl", CODE_FOR_xlfii_load, B_UID(515) };
+static const struct builtin B18_vec_lvxl = { { &T_int, &T_const_vec_u16_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvxl:18", "*lvxl", CODE_FOR_xlfii_load, B_UID(516) };
+static const struct builtin B19_vec_lvxl = { { &T_int, &T_const_vec_u32_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:19", "*lvxl", CODE_FOR_xlfii_load, B_UID(517) };
+static const struct builtin B20_vec_lvxl = { { &T_int, &T_const_vec_u8_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvxl:20", "*lvxl", CODE_FOR_xlfii_load, B_UID(518) };
+static const struct builtin B_vec_vlogefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vlogefp", "*vlogefp", CODE_FOR_xfx_fp, B_UID(519) };
+static const struct builtin B1_vec_lvsl = { { &T_int, &T_const_volatile_float_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:1", "*lvsl", CODE_FOR_xfii_load, B_UID(520) };
+static const struct builtin B2_vec_lvsl = { { &T_int, &T_const_volatile_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:2", "*lvsl", CODE_FOR_xfii_load, B_UID(521) };
+static const struct builtin B3_vec_lvsl = { { &T_int, &T_const_volatile_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:3", "*lvsl", CODE_FOR_xfii_load, B_UID(522) };
+static const struct builtin B4_vec_lvsl = { { &T_int, &T_const_volatile_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:4", "*lvsl", CODE_FOR_xfii_load, B_UID(523) };
+static const struct builtin B5_vec_lvsl = { { &T_int, &T_const_volatile_signed_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:5", "*lvsl", CODE_FOR_xfii_load, B_UID(524) };
+static const struct builtin B6_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:6", "*lvsl", CODE_FOR_xfii_load, B_UID(525) };
+static const struct builtin B7_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:7", "*lvsl", CODE_FOR_xfii_load, B_UID(526) };
+static const struct builtin B8_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:8", "*lvsl", CODE_FOR_xfii_load, B_UID(527) };
+static const struct builtin B9_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:9", "*lvsl", CODE_FOR_xfii_load, B_UID(528) };
+static const struct builtin B1_vec_lvsr = { { &T_int, &T_const_volatile_float_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:1", "*lvsr", CODE_FOR_xfii_load, B_UID(529) };
+static const struct builtin B2_vec_lvsr = { { &T_int, &T_const_volatile_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:2", "*lvsr", CODE_FOR_xfii_load, B_UID(530) };
+static const struct builtin B3_vec_lvsr = { { &T_int, &T_const_volatile_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:3", "*lvsr", CODE_FOR_xfii_load, B_UID(531) };
+static const struct builtin B4_vec_lvsr = { { &T_int, &T_const_volatile_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:4", "*lvsr", CODE_FOR_xfii_load, B_UID(532) };
+static const struct builtin B5_vec_lvsr = { { &T_int, &T_const_volatile_signed_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:5", "*lvsr", CODE_FOR_xfii_load, B_UID(533) };
+static const struct builtin B6_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:6", "*lvsr", CODE_FOR_xfii_load, B_UID(534) };
+static const struct builtin B7_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:7", "*lvsr", CODE_FOR_xfii_load, B_UID(535) };
+static const struct builtin B8_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:8", "*lvsr", CODE_FOR_xfii_load, B_UID(536) };
+static const struct builtin B9_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:9", "*lvsr", CODE_FOR_xfii_load, B_UID(537) };
+static const struct builtin B_vec_vmaddfp = { { &T_vec_f32, &T_vec_f32, &T_vec_f32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vmaddfp", "*vmaddfp", CODE_FOR_xfxxx_fp, B_UID(538) };
+static const struct builtin B_vec_vmhaddshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmhaddshs", "*vmhaddshs", CODE_FOR_xfxxx_complex, B_UID(539) };
+static const struct builtin B1_vec_vmaxsh = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:1", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(540) };
+static const struct builtin B1_vec_vmaxuh = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:1", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(541) };
+static const struct builtin B1_vec_vmaxsw = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:1", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(542) };
+static const struct builtin B1_vec_vmaxuw = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:1", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(543) };
+static const struct builtin B1_vec_vmaxsb = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:1", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(544) };
+static const struct builtin B1_vec_vmaxub = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:1", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(545) };
+static const struct builtin B_vec_vmaxfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vmaxfp", "*vmaxfp", CODE_FOR_xfxx_simple, B_UID(546) };
+static const struct builtin B2_vec_vmaxsh = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:2", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(547) };
+static const struct builtin B3_vec_vmaxsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:3", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(548) };
+static const struct builtin B2_vec_vmaxsw = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:2", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(549) };
+static const struct builtin B3_vec_vmaxsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:3", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(550) };
+static const struct builtin B2_vec_vmaxsb = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:2", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(551) };
+static const struct builtin B3_vec_vmaxsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:3", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(552) };
+static const struct builtin B2_vec_vmaxuh = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:2", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(553) };
+static const struct builtin B3_vec_vmaxuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:3", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(554) };
+static const struct builtin B2_vec_vmaxuw = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:2", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(555) };
+static const struct builtin B3_vec_vmaxuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:3", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(556) };
+static const struct builtin B2_vec_vmaxub = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:2", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(557) };
+static const struct builtin B3_vec_vmaxub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:3", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(558) };
+static const struct builtin B1_vec_vmrghh = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vmrghh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(559) };
+static const struct builtin B1_vec_vmrghw = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vmrghw:1", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(560) };
+static const struct builtin B1_vec_vmrghb = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vmrghb:1", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(561) };
+static const struct builtin B2_vec_vmrghw = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vmrghw:2", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(562) };
+static const struct builtin B2_vec_vmrghh = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vmrghh:2", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(563) };
+static const struct builtin B3_vec_vmrghh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmrghh:3", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(564) };
+static const struct builtin B3_vec_vmrghw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmrghw:3", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(565) };
+static const struct builtin B2_vec_vmrghb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vmrghb:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(566) };
+static const struct builtin B4_vec_vmrghh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmrghh:4", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(567) };
+static const struct builtin B4_vec_vmrghw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmrghw:4", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(568) };
+static const struct builtin B3_vec_vmrghb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vmrghb:3", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(569) };
+static const struct builtin B1_vec_vmrglh = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vmrglh:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(570) };
+static const struct builtin B1_vec_vmrglw = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vmrglw:1", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(571) };
+static const struct builtin B1_vec_vmrglb = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vmrglb:1", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(572) };
+static const struct builtin B2_vec_vmrglw = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vmrglw:2", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(573) };
+static const struct builtin B2_vec_vmrglh = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vmrglh:2", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(574) };
+static const struct builtin B3_vec_vmrglh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmrglh:3", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(575) };
+static const struct builtin B3_vec_vmrglw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmrglw:3", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(576) };
+static const struct builtin B2_vec_vmrglb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vmrglb:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(577) };
+static const struct builtin B4_vec_vmrglh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmrglh:4", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(578) };
+static const struct builtin B4_vec_vmrglw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmrglw:4", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(579) };
+static const struct builtin B3_vec_vmrglb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vmrglb:3", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(580) };
+static const struct builtin B_vec_mfvscr = { { NULL, NULL, NULL, }, "", &T_volatile_vec_u16, 0, FALSE, FALSE, 0, "vec_mfvscr", "*mfvscr", CODE_FOR_vxf_fxu, B_UID(581) };
+static const struct builtin B1_vec_vminsh = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:1", "*vminsh", CODE_FOR_xfxx_simple, B_UID(582) };
+static const struct builtin B1_vec_vminuh = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:1", "*vminuh", CODE_FOR_xfxx_simple, B_UID(583) };
+static const struct builtin B1_vec_vminsw = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:1", "*vminsw", CODE_FOR_xfxx_simple, B_UID(584) };
+static const struct builtin B1_vec_vminuw = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:1", "*vminuw", CODE_FOR_xfxx_simple, B_UID(585) };
+static const struct builtin B1_vec_vminsb = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:1", "*vminsb", CODE_FOR_xfxx_simple, B_UID(586) };
+static const struct builtin B1_vec_vminub = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:1", "*vminub", CODE_FOR_xfxx_simple, B_UID(587) };
+static const struct builtin B_vec_vminfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vminfp", "*vminfp", CODE_FOR_xfxx_simple, B_UID(588) };
+static const struct builtin B2_vec_vminsh = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:2", "*vminsh", CODE_FOR_xfxx_simple, B_UID(589) };
+static const struct builtin B3_vec_vminsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:3", "*vminsh", CODE_FOR_xfxx_simple, B_UID(590) };
+static const struct builtin B2_vec_vminsw = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:2", "*vminsw", CODE_FOR_xfxx_simple, B_UID(591) };
+static const struct builtin B3_vec_vminsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:3", "*vminsw", CODE_FOR_xfxx_simple, B_UID(592) };
+static const struct builtin B2_vec_vminsb = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:2", "*vminsb", CODE_FOR_xfxx_simple, B_UID(593) };
+static const struct builtin B3_vec_vminsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:3", "*vminsb", CODE_FOR_xfxx_simple, B_UID(594) };
+static const struct builtin B2_vec_vminuh = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:2", "*vminuh", CODE_FOR_xfxx_simple, B_UID(595) };
+static const struct builtin B3_vec_vminuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:3", "*vminuh", CODE_FOR_xfxx_simple, B_UID(596) };
+static const struct builtin B2_vec_vminuw = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:2", "*vminuw", CODE_FOR_xfxx_simple, B_UID(597) };
+static const struct builtin B3_vec_vminuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:3", "*vminuw", CODE_FOR_xfxx_simple, B_UID(598) };
+static const struct builtin B2_vec_vminub = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:2", "*vminub", CODE_FOR_xfxx_simple, B_UID(599) };
+static const struct builtin B3_vec_vminub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:3", "*vminub", CODE_FOR_xfxx_simple, B_UID(600) };
+static const struct builtin B1_vec_vmladduhm = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:1", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(601) };
+static const struct builtin B2_vec_vmladduhm = { { &T_vec_s16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:2", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(602) };
+static const struct builtin B3_vec_vmladduhm = { { &T_vec_u16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:3", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(603) };
+static const struct builtin B4_vec_vmladduhm = { { &T_vec_u16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vmladduhm:4", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(604) };
+static const struct builtin B_vec_vmhraddshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmhraddshs", "*vmhraddshs", CODE_FOR_xfxxx_complex, B_UID(605) };
+static const struct builtin B_vec_vmsumshm = { { &T_vec_s16, &T_vec_s16, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsumshm", "*vmsumshm", CODE_FOR_xfxxx_complex, B_UID(606) };
+static const struct builtin B_vec_vmsummbm = { { &T_vec_s8, &T_vec_u8, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsummbm", "*vmsummbm", CODE_FOR_xfxxx_complex, B_UID(607) };
+static const struct builtin B_vec_vmsumuhm = { { &T_vec_u16, &T_vec_u16, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumuhm", "*vmsumuhm", CODE_FOR_xfxxx_complex, B_UID(608) };
+static const struct builtin B_vec_vmsumubm = { { &T_vec_u8, &T_vec_u8, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumubm", "*vmsumubm", CODE_FOR_xfxxx_complex, B_UID(609) };
+static const struct builtin B_vec_vmsumshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsumshs", "*vmsumshs", CODE_FOR_xfxxx_complex, B_UID(610) };
+static const struct builtin B_vec_vmsumuhs = { { &T_vec_u16, &T_vec_u16, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumuhs", "*vmsumuhs", CODE_FOR_xfxxx_complex, B_UID(611) };
+static const struct builtin B1_vec_mtvscr = { { &T_vec_b16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:1", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(612) };
+static const struct builtin B2_vec_mtvscr = { { &T_vec_b32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:2", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(613) };
+static const struct builtin B3_vec_mtvscr = { { &T_vec_b8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:3", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(614) };
+static const struct builtin B4_vec_mtvscr = { { &T_vec_p16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:4", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(615) };
+static const struct builtin B5_vec_mtvscr = { { &T_vec_s16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:5", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(616) };
+static const struct builtin B6_vec_mtvscr = { { &T_vec_s32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:6", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(617) };
+static const struct builtin B7_vec_mtvscr = { { &T_vec_s8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:7", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(618) };
+static const struct builtin B8_vec_mtvscr = { { &T_vec_u16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:8", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(619) };
+static const struct builtin B9_vec_mtvscr = { { &T_vec_u32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:9", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(620) };
+static const struct builtin B10_vec_mtvscr = { { &T_vec_u8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:10", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(621) };
+static const struct builtin B_vec_vmulesh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmulesh", "*vmulesh", CODE_FOR_xfxx_complex, B_UID(622) };
+static const struct builtin B_vec_vmulesb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmulesb", "*vmulesb", CODE_FOR_xfxx_complex, B_UID(623) };
+static const struct builtin B_vec_vmuleuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmuleuh", "*vmuleuh", CODE_FOR_xfxx_complex, B_UID(624) };
+static const struct builtin B_vec_vmuleub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmuleub", "*vmuleub", CODE_FOR_xfxx_complex, B_UID(625) };
+static const struct builtin B_vec_vmulosh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmulosh", "*vmulosh", CODE_FOR_xfxx_complex, B_UID(626) };
+static const struct builtin B_vec_vmulosb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmulosb", "*vmulosb", CODE_FOR_xfxx_complex, B_UID(627) };
+static const struct builtin B_vec_vmulouh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmulouh", "*vmulouh", CODE_FOR_xfxx_complex, B_UID(628) };
+static const struct builtin B_vec_vmuloub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmuloub", "*vmuloub", CODE_FOR_xfxx_complex, B_UID(629) };
+static const struct builtin B_vec_vnmsubfp = { { &T_vec_f32, &T_vec_f32, &T_vec_f32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vnmsubfp", "*vnmsubfp", CODE_FOR_xfxxx_fp, B_UID(630) };
+static const struct builtin B1_vec_vnor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vnor:1", "*vnor", CODE_FOR_xfxx_simple, B_UID(631) };
+static const struct builtin B2_vec_vnor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vnor:2", "*vnor", CODE_FOR_xfxx_simple, B_UID(632) };
+static const struct builtin B3_vec_vnor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vnor:3", "*vnor", CODE_FOR_xfxx_simple, B_UID(633) };
+static const struct builtin B4_vec_vnor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vnor:4", "*vnor", CODE_FOR_xfxx_simple, B_UID(634) };
+static const struct builtin B5_vec_vnor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vnor:5", "*vnor", CODE_FOR_xfxx_simple, B_UID(635) };
+static const struct builtin B6_vec_vnor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vnor:6", "*vnor", CODE_FOR_xfxx_simple, B_UID(636) };
+static const struct builtin B7_vec_vnor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vnor:7", "*vnor", CODE_FOR_xfxx_simple, B_UID(637) };
+static const struct builtin B8_vec_vnor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vnor:8", "*vnor", CODE_FOR_xfxx_simple, B_UID(638) };
+static const struct builtin B9_vec_vnor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vnor:9", "*vnor", CODE_FOR_xfxx_simple, B_UID(639) };
+static const struct builtin B10_vec_vnor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vnor:10", "*vnor", CODE_FOR_xfxx_simple, B_UID(640) };
+static const struct builtin B1_vec_vor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 2, "vec_vor:1", "*vor", CODE_FOR_xfxx_simple, B_UID(641) };
+static const struct builtin B2_vec_vor = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:2", "*vor", CODE_FOR_xfxx_simple, B_UID(642) };
+static const struct builtin B3_vec_vor = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:3", "*vor", CODE_FOR_xfxx_simple, B_UID(643) };
+static const struct builtin B4_vec_vor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 2, "vec_vor:4", "*vor", CODE_FOR_xfxx_simple, B_UID(644) };
+static const struct builtin B5_vec_vor = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:5", "*vor", CODE_FOR_xfxx_simple, B_UID(645) };
+static const struct builtin B6_vec_vor = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:6", "*vor", CODE_FOR_xfxx_simple, B_UID(646) };
+static const struct builtin B7_vec_vor = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:7", "*vor", CODE_FOR_xfxx_simple, B_UID(647) };
+static const struct builtin B8_vec_vor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 2, "vec_vor:8", "*vor", CODE_FOR_xfxx_simple, B_UID(648) };
+static const struct builtin B9_vec_vor = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:9", "*vor", CODE_FOR_xfxx_simple, B_UID(649) };
+static const struct builtin B10_vec_vor = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:10", "*vor", CODE_FOR_xfxx_simple, B_UID(650) };
+static const struct builtin B11_vec_vor = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:11", "*vor", CODE_FOR_xfxx_simple, B_UID(651) };
+static const struct builtin B12_vec_vor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:12", "*vor", CODE_FOR_xfxx_simple, B_UID(652) };
+static const struct builtin B13_vec_vor = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:13", "*vor", CODE_FOR_xfxx_simple, B_UID(653) };
+static const struct builtin B14_vec_vor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:14", "*vor", CODE_FOR_xfxx_simple, B_UID(654) };
+static const struct builtin B15_vec_vor = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:15", "*vor", CODE_FOR_xfxx_simple, B_UID(655) };
+static const struct builtin B16_vec_vor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:16", "*vor", CODE_FOR_xfxx_simple, B_UID(656) };
+static const struct builtin B17_vec_vor = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:17", "*vor", CODE_FOR_xfxx_simple, B_UID(657) };
+static const struct builtin B18_vec_vor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:18", "*vor", CODE_FOR_xfxx_simple, B_UID(658) };
+static const struct builtin B19_vec_vor = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:19", "*vor", CODE_FOR_xfxx_simple, B_UID(659) };
+static const struct builtin B20_vec_vor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:20", "*vor", CODE_FOR_xfxx_simple, B_UID(660) };
+static const struct builtin B21_vec_vor = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:21", "*vor", CODE_FOR_xfxx_simple, B_UID(661) };
+static const struct builtin B22_vec_vor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:22", "*vor", CODE_FOR_xfxx_simple, B_UID(662) };
+static const struct builtin B23_vec_vor = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:23", "*vor", CODE_FOR_xfxx_simple, B_UID(663) };
+static const struct builtin B24_vec_vor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:24", "*vor", CODE_FOR_xfxx_simple, B_UID(664) };
+static const struct builtin B1_vec_vpkuhum = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vpkuhum:1", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(665) };
+static const struct builtin B1_vec_vpkuwum = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vpkuwum:1", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(666) };
+static const struct builtin B2_vec_vpkuhum = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vpkuhum:2", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(667) };
+static const struct builtin B2_vec_vpkuwum = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vpkuwum:2", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(668) };
+static const struct builtin B3_vec_vpkuhum = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkuhum:3", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(669) };
+static const struct builtin B3_vec_vpkuwum = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkuwum:3", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(670) };
+static const struct builtin B_vec_vpkpx = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vpkpx", "*vpkpx", CODE_FOR_xfxx_perm, B_UID(671) };
+static const struct builtin B_vec_vpkshss = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vpkshss", "*vpkshss", CODE_FOR_xfxx_perm, B_UID(672) };
+static const struct builtin B_vec_vpkswss = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vpkswss", "*vpkswss", CODE_FOR_xfxx_perm, B_UID(673) };
+static const struct builtin B_vec_vpkuhus = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkuhus", "*vpkuhus", CODE_FOR_xfxx_perm, B_UID(674) };
+static const struct builtin B_vec_vpkuwus = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkuwus", "*vpkuwus", CODE_FOR_xfxx_perm, B_UID(675) };
+static const struct builtin B_vec_vpkshus = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkshus", "*vpkshus", CODE_FOR_xfxx_perm, B_UID(676) };
+static const struct builtin B_vec_vpkswus = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkswus", "*vpkswus", CODE_FOR_xfxx_perm, B_UID(677) };
+static const struct builtin B1_vec_vperm = { { &T_vec_b16, &T_vec_b16, &T_vec_u8, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vperm:1", "*vperm", CODE_FOR_xfxxx_perm, B_UID(678) };
+static const struct builtin B2_vec_vperm = { { &T_vec_b32, &T_vec_b32, &T_vec_u8, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vperm:2", "*vperm", CODE_FOR_xfxxx_perm, B_UID(679) };
+static const struct builtin B3_vec_vperm = { { &T_vec_b8, &T_vec_b8, &T_vec_u8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vperm:3", "*vperm", CODE_FOR_xfxxx_perm, B_UID(680) };
+static const struct builtin B4_vec_vperm = { { &T_vec_f32, &T_vec_f32, &T_vec_u8, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vperm:4", "*vperm", CODE_FOR_xfxxx_perm, B_UID(681) };
+static const struct builtin B5_vec_vperm = { { &T_vec_p16, &T_vec_p16, &T_vec_u8, }, "xxx", &T_vec_p16, 3, FALSE, FALSE, 0, "vec_vperm:5", "*vperm", CODE_FOR_xfxxx_perm, B_UID(682) };
+static const struct builtin B6_vec_vperm = { { &T_vec_s16, &T_vec_s16, &T_vec_u8, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vperm:6", "*vperm", CODE_FOR_xfxxx_perm, B_UID(683) };
+static const struct builtin B7_vec_vperm = { { &T_vec_s32, &T_vec_s32, &T_vec_u8, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vperm:7", "*vperm", CODE_FOR_xfxxx_perm, B_UID(684) };
+static const struct builtin B8_vec_vperm = { { &T_vec_s8, &T_vec_s8, &T_vec_u8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vperm:8", "*vperm", CODE_FOR_xfxxx_perm, B_UID(685) };
+static const struct builtin B9_vec_vperm = { { &T_vec_u16, &T_vec_u16, &T_vec_u8, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vperm:9", "*vperm", CODE_FOR_xfxxx_perm, B_UID(686) };
+static const struct builtin B10_vec_vperm = { { &T_vec_u32, &T_vec_u32, &T_vec_u8, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vperm:10", "*vperm", CODE_FOR_xfxxx_perm, B_UID(687) };
+static const struct builtin B11_vec_vperm = { { &T_vec_u8, &T_vec_u8, &T_vec_u8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vperm:11", "*vperm", CODE_FOR_xfxxx_perm, B_UID(688) };
+static const struct builtin B_vec_vrefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrefp", "*vrefp", CODE_FOR_xfx_fp, B_UID(689) };
+static const struct builtin B1_vec_vrlh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vrlh:1", "*vrlh", CODE_FOR_xfxx_simple, B_UID(690) };
+static const struct builtin B1_vec_vrlw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vrlw:1", "*vrlw", CODE_FOR_xfxx_simple, B_UID(691) };
+static const struct builtin B1_vec_vrlb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vrlb:1", "*vrlb", CODE_FOR_xfxx_simple, B_UID(692) };
+static const struct builtin B2_vec_vrlh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vrlh:2", "*vrlh", CODE_FOR_xfxx_simple, B_UID(693) };
+static const struct builtin B2_vec_vrlw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vrlw:2", "*vrlw", CODE_FOR_xfxx_simple, B_UID(694) };
+static const struct builtin B2_vec_vrlb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vrlb:2", "*vrlb", CODE_FOR_xfxx_simple, B_UID(695) };
+static const struct builtin B_vec_vrfin = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfin", "*vrfin", CODE_FOR_xfx_fp, B_UID(696) };
+static const struct builtin B_vec_vrsqrtefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrsqrtefp", "*vrsqrtefp", CODE_FOR_xfx_fp, B_UID(697) };
+static const struct builtin B1_vec_vsel = { { &T_vec_b16, &T_vec_b16, &T_vec_b16, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vsel:1", "*vsel", CODE_FOR_xfxxx_simple, B_UID(698) };
+static const struct builtin B2_vec_vsel = { { &T_vec_b16, &T_vec_b16, &T_vec_u16, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vsel:2", "*vsel", CODE_FOR_xfxxx_simple, B_UID(699) };
+static const struct builtin B3_vec_vsel = { { &T_vec_b32, &T_vec_b32, &T_vec_b32, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vsel:3", "*vsel", CODE_FOR_xfxxx_simple, B_UID(700) };
+static const struct builtin B4_vec_vsel = { { &T_vec_b32, &T_vec_b32, &T_vec_u32, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vsel:4", "*vsel", CODE_FOR_xfxxx_simple, B_UID(701) };
+static const struct builtin B5_vec_vsel = { { &T_vec_b8, &T_vec_b8, &T_vec_b8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vsel:5", "*vsel", CODE_FOR_xfxxx_simple, B_UID(702) };
+static const struct builtin B6_vec_vsel = { { &T_vec_b8, &T_vec_b8, &T_vec_u8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vsel:6", "*vsel", CODE_FOR_xfxxx_simple, B_UID(703) };
+static const struct builtin B7_vec_vsel = { { &T_vec_f32, &T_vec_f32, &T_vec_b32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vsel:7", "*vsel", CODE_FOR_xfxxx_simple, B_UID(704) };
+static const struct builtin B8_vec_vsel = { { &T_vec_f32, &T_vec_f32, &T_vec_u32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vsel:8", "*vsel", CODE_FOR_xfxxx_simple, B_UID(705) };
+static const struct builtin B9_vec_vsel = { { &T_vec_s16, &T_vec_s16, &T_vec_b16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vsel:9", "*vsel", CODE_FOR_xfxxx_simple, B_UID(706) };
+static const struct builtin B10_vec_vsel = { { &T_vec_s16, &T_vec_s16, &T_vec_u16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vsel:10", "*vsel", CODE_FOR_xfxxx_simple, B_UID(707) };
+static const struct builtin B11_vec_vsel = { { &T_vec_s32, &T_vec_s32, &T_vec_b32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vsel:11", "*vsel", CODE_FOR_xfxxx_simple, B_UID(708) };
+static const struct builtin B12_vec_vsel = { { &T_vec_s32, &T_vec_s32, &T_vec_u32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vsel:12", "*vsel", CODE_FOR_xfxxx_simple, B_UID(709) };
+static const struct builtin B13_vec_vsel = { { &T_vec_s8, &T_vec_s8, &T_vec_b8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vsel:13", "*vsel", CODE_FOR_xfxxx_simple, B_UID(710) };
+static const struct builtin B14_vec_vsel = { { &T_vec_s8, &T_vec_s8, &T_vec_u8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vsel:14", "*vsel", CODE_FOR_xfxxx_simple, B_UID(711) };
+static const struct builtin B15_vec_vsel = { { &T_vec_u16, &T_vec_u16, &T_vec_b16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vsel:15", "*vsel", CODE_FOR_xfxxx_simple, B_UID(712) };
+static const struct builtin B16_vec_vsel = { { &T_vec_u16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vsel:16", "*vsel", CODE_FOR_xfxxx_simple, B_UID(713) };
+static const struct builtin B17_vec_vsel = { { &T_vec_u32, &T_vec_u32, &T_vec_b32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vsel:17", "*vsel", CODE_FOR_xfxxx_simple, B_UID(714) };
+static const struct builtin B18_vec_vsel = { { &T_vec_u32, &T_vec_u32, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vsel:18", "*vsel", CODE_FOR_xfxxx_simple, B_UID(715) };
+static const struct builtin B19_vec_vsel = { { &T_vec_u8, &T_vec_u8, &T_vec_b8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vsel:19", "*vsel", CODE_FOR_xfxxx_simple, B_UID(716) };
+static const struct builtin B20_vec_vsel = { { &T_vec_u8, &T_vec_u8, &T_vec_u8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vsel:20", "*vsel", CODE_FOR_xfxxx_simple, B_UID(717) };
+static const struct builtin B1_vec_vslh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslh:1", "*vslh", CODE_FOR_xfxx_simple, B_UID(718) };
+static const struct builtin B1_vec_vslw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslw:1", "*vslw", CODE_FOR_xfxx_simple, B_UID(719) };
+static const struct builtin B1_vec_vslb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslb:1", "*vslb", CODE_FOR_xfxx_simple, B_UID(720) };
+static const struct builtin B2_vec_vslh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslh:2", "*vslh", CODE_FOR_xfxx_simple, B_UID(721) };
+static const struct builtin B2_vec_vslw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslw:2", "*vslw", CODE_FOR_xfxx_simple, B_UID(722) };
+static const struct builtin B2_vec_vslb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslb:2", "*vslb", CODE_FOR_xfxx_simple, B_UID(723) };
+static const struct builtin B1_vec_vsldoi = { { &T_vec_b16, &T_vec_b16, &T_immed_u4, }, "xxC", &T_vec_b16, 3, FALSE, FALSE, 3, "vec_vsldoi:1", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(724) };
+static const struct builtin B2_vec_vsldoi = { { &T_vec_b32, &T_vec_b32, &T_immed_u4, }, "xxC", &T_vec_b32, 3, FALSE, FALSE, 3, "vec_vsldoi:2", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(725) };
+static const struct builtin B3_vec_vsldoi = { { &T_vec_b8, &T_vec_b8, &T_immed_u4, }, "xxC", &T_vec_b8, 3, FALSE, FALSE, 3, "vec_vsldoi:3", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(726) };
+static const struct builtin B4_vec_vsldoi = { { &T_vec_f32, &T_vec_f32, &T_immed_u4, }, "xxC", &T_vec_f32, 3, FALSE, FALSE, 3, "vec_vsldoi:4", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(727) };
+static const struct builtin B5_vec_vsldoi = { { &T_vec_p16, &T_vec_p16, &T_immed_u4, }, "xxC", &T_vec_p16, 3, FALSE, FALSE, 3, "vec_vsldoi:5", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(728) };
+static const struct builtin B6_vec_vsldoi = { { &T_vec_s16, &T_vec_s16, &T_immed_u4, }, "xxC", &T_vec_s16, 3, FALSE, FALSE, 3, "vec_vsldoi:6", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(729) };
+static const struct builtin B7_vec_vsldoi = { { &T_vec_s32, &T_vec_s32, &T_immed_u4, }, "xxC", &T_vec_s32, 3, FALSE, FALSE, 3, "vec_vsldoi:7", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(730) };
+static const struct builtin B8_vec_vsldoi = { { &T_vec_s8, &T_vec_s8, &T_immed_u4, }, "xxC", &T_vec_s8, 3, FALSE, FALSE, 3, "vec_vsldoi:8", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(731) };
+static const struct builtin B9_vec_vsldoi = { { &T_vec_u16, &T_vec_u16, &T_immed_u4, }, "xxC", &T_vec_u16, 3, FALSE, FALSE, 3, "vec_vsldoi:9", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(732) };
+static const struct builtin B10_vec_vsldoi = { { &T_vec_u32, &T_vec_u32, &T_immed_u4, }, "xxC", &T_vec_u32, 3, FALSE, FALSE, 3, "vec_vsldoi:10", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(733) };
+static const struct builtin B11_vec_vsldoi = { { &T_vec_u8, &T_vec_u8, &T_immed_u4, }, "xxC", &T_vec_u8, 3, FALSE, FALSE, 3, "vec_vsldoi:11", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(734) };
+static const struct builtin B1_vec_vsl = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:1", "*vsl", CODE_FOR_xfxx_simple, B_UID(735) };
+static const struct builtin B2_vec_vsl = { { &T_vec_b16, &T_vec_u32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:2", "*vsl", CODE_FOR_xfxx_simple, B_UID(736) };
+static const struct builtin B3_vec_vsl = { { &T_vec_b16, &T_vec_u8, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:3", "*vsl", CODE_FOR_xfxx_simple, B_UID(737) };
+static const struct builtin B4_vec_vsl = { { &T_vec_b32, &T_vec_u16, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:4", "*vsl", CODE_FOR_xfxx_simple, B_UID(738) };
+static const struct builtin B5_vec_vsl = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:5", "*vsl", CODE_FOR_xfxx_simple, B_UID(739) };
+static const struct builtin B6_vec_vsl = { { &T_vec_b32, &T_vec_u8, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:6", "*vsl", CODE_FOR_xfxx_simple, B_UID(740) };
+static const struct builtin B7_vec_vsl = { { &T_vec_b8, &T_vec_u16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:7", "*vsl", CODE_FOR_xfxx_simple, B_UID(741) };
+static const struct builtin B8_vec_vsl = { { &T_vec_b8, &T_vec_u32, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:8", "*vsl", CODE_FOR_xfxx_simple, B_UID(742) };
+static const struct builtin B9_vec_vsl = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:9", "*vsl", CODE_FOR_xfxx_simple, B_UID(743) };
+static const struct builtin B10_vec_vsl = { { &T_vec_p16, &T_vec_u16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:10", "*vsl", CODE_FOR_xfxx_simple, B_UID(744) };
+static const struct builtin B11_vec_vsl = { { &T_vec_p16, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:11", "*vsl", CODE_FOR_xfxx_simple, B_UID(745) };
+static const struct builtin B12_vec_vsl = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:12", "*vsl", CODE_FOR_xfxx_simple, B_UID(746) };
+static const struct builtin B13_vec_vsl = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:13", "*vsl", CODE_FOR_xfxx_simple, B_UID(747) };
+static const struct builtin B14_vec_vsl = { { &T_vec_s16, &T_vec_u32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:14", "*vsl", CODE_FOR_xfxx_simple, B_UID(748) };
+static const struct builtin B15_vec_vsl = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:15", "*vsl", CODE_FOR_xfxx_simple, B_UID(749) };
+static const struct builtin B16_vec_vsl = { { &T_vec_s32, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:16", "*vsl", CODE_FOR_xfxx_simple, B_UID(750) };
+static const struct builtin B17_vec_vsl = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:17", "*vsl", CODE_FOR_xfxx_simple, B_UID(751) };
+static const struct builtin B18_vec_vsl = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:18", "*vsl", CODE_FOR_xfxx_simple, B_UID(752) };
+static const struct builtin B19_vec_vsl = { { &T_vec_s8, &T_vec_u16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:19", "*vsl", CODE_FOR_xfxx_simple, B_UID(753) };
+static const struct builtin B20_vec_vsl = { { &T_vec_s8, &T_vec_u32, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:20", "*vsl", CODE_FOR_xfxx_simple, B_UID(754) };
+static const struct builtin B21_vec_vsl = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:21", "*vsl", CODE_FOR_xfxx_simple, B_UID(755) };
+static const struct builtin B22_vec_vsl = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:22", "*vsl", CODE_FOR_xfxx_simple, B_UID(756) };
+static const struct builtin B23_vec_vsl = { { &T_vec_u16, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:23", "*vsl", CODE_FOR_xfxx_simple, B_UID(757) };
+static const struct builtin B24_vec_vsl = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:24", "*vsl", CODE_FOR_xfxx_simple, B_UID(758) };
+static const struct builtin B25_vec_vsl = { { &T_vec_u32, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:25", "*vsl", CODE_FOR_xfxx_simple, B_UID(759) };
+static const struct builtin B26_vec_vsl = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:26", "*vsl", CODE_FOR_xfxx_simple, B_UID(760) };
+static const struct builtin B27_vec_vsl = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:27", "*vsl", CODE_FOR_xfxx_simple, B_UID(761) };
+static const struct builtin B28_vec_vsl = { { &T_vec_u8, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:28", "*vsl", CODE_FOR_xfxx_simple, B_UID(762) };
+static const struct builtin B29_vec_vsl = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:29", "*vsl", CODE_FOR_xfxx_simple, B_UID(763) };
+static const struct builtin B30_vec_vsl = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:30", "*vsl", CODE_FOR_xfxx_simple, B_UID(764) };
+static const struct builtin B1_vec_vslo = { { &T_vec_f32, &T_vec_s8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vslo:1", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(765) };
+static const struct builtin B2_vec_vslo = { { &T_vec_f32, &T_vec_u8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vslo:2", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(766) };
+static const struct builtin B3_vec_vslo = { { &T_vec_p16, &T_vec_s8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vslo:3", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(767) };
+static const struct builtin B4_vec_vslo = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vslo:4", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(768) };
+static const struct builtin B5_vec_vslo = { { &T_vec_s16, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslo:5", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(769) };
+static const struct builtin B6_vec_vslo = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslo:6", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(770) };
+static const struct builtin B7_vec_vslo = { { &T_vec_s32, &T_vec_s8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslo:7", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(771) };
+static const struct builtin B8_vec_vslo = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslo:8", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(772) };
+static const struct builtin B9_vec_vslo = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslo:9", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(773) };
+static const struct builtin B10_vec_vslo = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslo:10", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(774) };
+static const struct builtin B11_vec_vslo = { { &T_vec_u16, &T_vec_s8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslo:11", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(775) };
+static const struct builtin B12_vec_vslo = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslo:12", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(776) };
+static const struct builtin B13_vec_vslo = { { &T_vec_u32, &T_vec_s8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslo:13", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(777) };
+static const struct builtin B14_vec_vslo = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslo:14", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(778) };
+static const struct builtin B15_vec_vslo = { { &T_vec_u8, &T_vec_s8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslo:15", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(779) };
+static const struct builtin B16_vec_vslo = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslo:16", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(780) };
+static const struct builtin B1_vec_vsplth = { { &T_vec_b16, &T_immed_u5, NULL, }, "xB", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsplth:1", "*vsplth", CODE_FOR_xfxB_perm, B_UID(781) };
+static const struct builtin B1_vec_vspltw = { { &T_vec_b32, &T_immed_u5, NULL, }, "xB", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vspltw:1", "*vspltw", CODE_FOR_xfxB_perm, B_UID(782) };
+static const struct builtin B1_vec_vspltb = { { &T_vec_b8, &T_immed_u5, NULL, }, "xB", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vspltb:1", "*vspltb", CODE_FOR_xfxB_perm, B_UID(783) };
+static const struct builtin B2_vec_vspltw = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vspltw:2", "*vspltw", CODE_FOR_xfxB_perm, B_UID(784) };
+static const struct builtin B2_vec_vsplth = { { &T_vec_p16, &T_immed_u5, NULL, }, "xB", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsplth:2", "*vsplth", CODE_FOR_xfxB_perm, B_UID(785) };
+static const struct builtin B3_vec_vsplth = { { &T_vec_s16, &T_immed_u5, NULL, }, "xB", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsplth:3", "*vsplth", CODE_FOR_xfxB_perm, B_UID(786) };
+static const struct builtin B3_vec_vspltw = { { &T_vec_s32, &T_immed_u5, NULL, }, "xB", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vspltw:3", "*vspltw", CODE_FOR_xfxB_perm, B_UID(787) };
+static const struct builtin B2_vec_vspltb = { { &T_vec_s8, &T_immed_u5, NULL, }, "xB", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vspltb:2", "*vspltb", CODE_FOR_xfxB_perm, B_UID(788) };
+static const struct builtin B4_vec_vsplth = { { &T_vec_u16, &T_immed_u5, NULL, }, "xB", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsplth:4", "*vsplth", CODE_FOR_xfxB_perm, B_UID(789) };
+static const struct builtin B4_vec_vspltw = { { &T_vec_u32, &T_immed_u5, NULL, }, "xB", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vspltw:4", "*vspltw", CODE_FOR_xfxB_perm, B_UID(790) };
+static const struct builtin B3_vec_vspltb = { { &T_vec_u8, &T_immed_u5, NULL, }, "xB", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vspltb:3", "*vspltb", CODE_FOR_xfxB_perm, B_UID(791) };
+static const struct builtin B_vec_vspltish = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s16, 1, FALSE, FALSE, 5, "vec_vspltish", "*vspltish", CODE_FOR_xfA_perm, B_UID(792) };
+static const struct builtin B_vec_vspltisw = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s32, 1, FALSE, FALSE, 6, "vec_vspltisw", "*vspltisw", CODE_FOR_xfA_perm, B_UID(793) };
+static const struct builtin B_vec_vspltisb = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s8, 1, FALSE, FALSE, 4, "vec_vspltisb", "*vspltisb", CODE_FOR_xfA_perm, B_UID(794) };
+static const struct builtin B_vec_splat_u16 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u16, 1, FALSE, FALSE, 5, "vec_splat_u16", "*vspltish", CODE_FOR_xfA_perm, B_UID(795) };
+static const struct builtin B_vec_splat_u32 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u32, 1, FALSE, FALSE, 6, "vec_splat_u32", "*vspltisw", CODE_FOR_xfA_perm, B_UID(796) };
+static const struct builtin B_vec_splat_u8 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u8, 1, FALSE, FALSE, 4, "vec_splat_u8", "*vspltisb", CODE_FOR_xfA_perm, B_UID(797) };
+static const struct builtin B1_vec_vsrh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsrh:1", "*vsrh", CODE_FOR_xfxx_simple, B_UID(798) };
+static const struct builtin B1_vec_vsrw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsrw:1", "*vsrw", CODE_FOR_xfxx_simple, B_UID(799) };
+static const struct builtin B1_vec_vsrb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsrb:1", "*vsrb", CODE_FOR_xfxx_simple, B_UID(800) };
+static const struct builtin B2_vec_vsrh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsrh:2", "*vsrh", CODE_FOR_xfxx_simple, B_UID(801) };
+static const struct builtin B2_vec_vsrw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsrw:2", "*vsrw", CODE_FOR_xfxx_simple, B_UID(802) };
+static const struct builtin B2_vec_vsrb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsrb:2", "*vsrb", CODE_FOR_xfxx_simple, B_UID(803) };
+static const struct builtin B1_vec_vsrah = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsrah:1", "*vsrah", CODE_FOR_xfxx_simple, B_UID(804) };
+static const struct builtin B1_vec_vsraw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsraw:1", "*vsraw", CODE_FOR_xfxx_simple, B_UID(805) };
+static const struct builtin B1_vec_vsrab = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsrab:1", "*vsrab", CODE_FOR_xfxx_simple, B_UID(806) };
+static const struct builtin B2_vec_vsrah = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsrah:2", "*vsrah", CODE_FOR_xfxx_simple, B_UID(807) };
+static const struct builtin B2_vec_vsraw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsraw:2", "*vsraw", CODE_FOR_xfxx_simple, B_UID(808) };
+static const struct builtin B2_vec_vsrab = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsrab:2", "*vsrab", CODE_FOR_xfxx_simple, B_UID(809) };
+static const struct builtin B1_vec_vsr = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:1", "*vsr", CODE_FOR_xfxx_simple, B_UID(810) };
+static const struct builtin B2_vec_vsr = { { &T_vec_b16, &T_vec_u32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:2", "*vsr", CODE_FOR_xfxx_simple, B_UID(811) };
+static const struct builtin B3_vec_vsr = { { &T_vec_b16, &T_vec_u8, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:3", "*vsr", CODE_FOR_xfxx_simple, B_UID(812) };
+static const struct builtin B4_vec_vsr = { { &T_vec_b32, &T_vec_u16, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:4", "*vsr", CODE_FOR_xfxx_simple, B_UID(813) };
+static const struct builtin B5_vec_vsr = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:5", "*vsr", CODE_FOR_xfxx_simple, B_UID(814) };
+static const struct builtin B6_vec_vsr = { { &T_vec_b32, &T_vec_u8, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:6", "*vsr", CODE_FOR_xfxx_simple, B_UID(815) };
+static const struct builtin B7_vec_vsr = { { &T_vec_b8, &T_vec_u16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:7", "*vsr", CODE_FOR_xfxx_simple, B_UID(816) };
+static const struct builtin B8_vec_vsr = { { &T_vec_b8, &T_vec_u32, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:8", "*vsr", CODE_FOR_xfxx_simple, B_UID(817) };
+static const struct builtin B9_vec_vsr = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:9", "*vsr", CODE_FOR_xfxx_simple, B_UID(818) };
+static const struct builtin B10_vec_vsr = { { &T_vec_p16, &T_vec_u16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:10", "*vsr", CODE_FOR_xfxx_simple, B_UID(819) };
+static const struct builtin B11_vec_vsr = { { &T_vec_p16, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:11", "*vsr", CODE_FOR_xfxx_simple, B_UID(820) };
+static const struct builtin B12_vec_vsr = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:12", "*vsr", CODE_FOR_xfxx_simple, B_UID(821) };
+static const struct builtin B13_vec_vsr = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:13", "*vsr", CODE_FOR_xfxx_simple, B_UID(822) };
+static const struct builtin B14_vec_vsr = { { &T_vec_s16, &T_vec_u32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:14", "*vsr", CODE_FOR_xfxx_simple, B_UID(823) };
+static const struct builtin B15_vec_vsr = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:15", "*vsr", CODE_FOR_xfxx_simple, B_UID(824) };
+static const struct builtin B16_vec_vsr = { { &T_vec_s32, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:16", "*vsr", CODE_FOR_xfxx_simple, B_UID(825) };
+static const struct builtin B17_vec_vsr = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:17", "*vsr", CODE_FOR_xfxx_simple, B_UID(826) };
+static const struct builtin B18_vec_vsr = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:18", "*vsr", CODE_FOR_xfxx_simple, B_UID(827) };
+static const struct builtin B19_vec_vsr = { { &T_vec_s8, &T_vec_u16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:19", "*vsr", CODE_FOR_xfxx_simple, B_UID(828) };
+static const struct builtin B20_vec_vsr = { { &T_vec_s8, &T_vec_u32, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:20", "*vsr", CODE_FOR_xfxx_simple, B_UID(829) };
+static const struct builtin B21_vec_vsr = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:21", "*vsr", CODE_FOR_xfxx_simple, B_UID(830) };
+static const struct builtin B22_vec_vsr = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:22", "*vsr", CODE_FOR_xfxx_simple, B_UID(831) };
+static const struct builtin B23_vec_vsr = { { &T_vec_u16, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:23", "*vsr", CODE_FOR_xfxx_simple, B_UID(832) };
+static const struct builtin B24_vec_vsr = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:24", "*vsr", CODE_FOR_xfxx_simple, B_UID(833) };
+static const struct builtin B25_vec_vsr = { { &T_vec_u32, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:25", "*vsr", CODE_FOR_xfxx_simple, B_UID(834) };
+static const struct builtin B26_vec_vsr = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:26", "*vsr", CODE_FOR_xfxx_simple, B_UID(835) };
+static const struct builtin B27_vec_vsr = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:27", "*vsr", CODE_FOR_xfxx_simple, B_UID(836) };
+static const struct builtin B28_vec_vsr = { { &T_vec_u8, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:28", "*vsr", CODE_FOR_xfxx_simple, B_UID(837) };
+static const struct builtin B29_vec_vsr = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:29", "*vsr", CODE_FOR_xfxx_simple, B_UID(838) };
+static const struct builtin B30_vec_vsr = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:30", "*vsr", CODE_FOR_xfxx_simple, B_UID(839) };
+static const struct builtin B1_vec_vsro = { { &T_vec_f32, &T_vec_s8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vsro:1", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(840) };
+static const struct builtin B2_vec_vsro = { { &T_vec_f32, &T_vec_u8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vsro:2", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(841) };
+static const struct builtin B3_vec_vsro = { { &T_vec_p16, &T_vec_s8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsro:3", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(842) };
+static const struct builtin B4_vec_vsro = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsro:4", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(843) };
+static const struct builtin B5_vec_vsro = { { &T_vec_s16, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsro:5", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(844) };
+static const struct builtin B6_vec_vsro = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsro:6", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(845) };
+static const struct builtin B7_vec_vsro = { { &T_vec_s32, &T_vec_s8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsro:7", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(846) };
+static const struct builtin B8_vec_vsro = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsro:8", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(847) };
+static const struct builtin B9_vec_vsro = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsro:9", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(848) };
+static const struct builtin B10_vec_vsro = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsro:10", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(849) };
+static const struct builtin B11_vec_vsro = { { &T_vec_u16, &T_vec_s8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsro:11", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(850) };
+static const struct builtin B12_vec_vsro = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsro:12", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(851) };
+static const struct builtin B13_vec_vsro = { { &T_vec_u32, &T_vec_s8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsro:13", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(852) };
+static const struct builtin B14_vec_vsro = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsro:14", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(853) };
+static const struct builtin B15_vec_vsro = { { &T_vec_u8, &T_vec_s8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsro:15", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(854) };
+static const struct builtin B16_vec_vsro = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsro:16", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(855) };
+static const struct builtin B1_vec_stvx = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:1", "*stvx", CODE_FOR_sfxii_store, B_UID(856) };
+static const struct builtin B2_vec_stvx = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:2", "*stvx", CODE_FOR_sfxii_store, B_UID(857) };
+static const struct builtin B3_vec_stvx = { { &T_vec_b16, &T_int, &T_vec_b16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:3", "*stvx", CODE_FOR_sfxii_store, B_UID(858) };
+static const struct builtin B4_vec_stvx = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:4", "*stvx", CODE_FOR_sfxii_store, B_UID(859) };
+static const struct builtin B5_vec_stvx = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:5", "*stvx", CODE_FOR_sfxii_store, B_UID(860) };
+static const struct builtin B6_vec_stvx = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:6", "*stvx", CODE_FOR_sfxii_store, B_UID(861) };
+static const struct builtin B7_vec_stvx = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:7", "*stvx", CODE_FOR_sfxii_store, B_UID(862) };
+static const struct builtin B8_vec_stvx = { { &T_vec_b32, &T_int, &T_vec_b32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:8", "*stvx", CODE_FOR_sfxii_store, B_UID(863) };
+static const struct builtin B9_vec_stvx = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:9", "*stvx", CODE_FOR_sfxii_store, B_UID(864) };
+static const struct builtin B10_vec_stvx = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:10", "*stvx", CODE_FOR_sfxii_store, B_UID(865) };
+static const struct builtin B11_vec_stvx = { { &T_vec_b8, &T_int, &T_vec_b8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:11", "*stvx", CODE_FOR_sfxii_store, B_UID(866) };
+static const struct builtin B12_vec_stvx = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:12", "*stvx", CODE_FOR_sfxii_store, B_UID(867) };
+static const struct builtin B13_vec_stvx = { { &T_vec_f32, &T_int, &T_vec_f32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:13", "*stvx", CODE_FOR_sfxii_store, B_UID(868) };
+static const struct builtin B14_vec_stvx = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:14", "*stvx", CODE_FOR_sfxii_store, B_UID(869) };
+static const struct builtin B15_vec_stvx = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:15", "*stvx", CODE_FOR_sfxii_store, B_UID(870) };
+static const struct builtin B16_vec_stvx = { { &T_vec_p16, &T_int, &T_vec_p16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:16", "*stvx", CODE_FOR_sfxii_store, B_UID(871) };
+static const struct builtin B17_vec_stvx = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:17", "*stvx", CODE_FOR_sfxii_store, B_UID(872) };
+static const struct builtin B18_vec_stvx = { { &T_vec_s16, &T_int, &T_vec_s16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:18", "*stvx", CODE_FOR_sfxii_store, B_UID(873) };
+static const struct builtin B19_vec_stvx = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:19", "*stvx", CODE_FOR_sfxii_store, B_UID(874) };
+static const struct builtin B20_vec_stvx = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:20", "*stvx", CODE_FOR_sfxii_store, B_UID(875) };
+static const struct builtin B21_vec_stvx = { { &T_vec_s32, &T_int, &T_vec_s32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:21", "*stvx", CODE_FOR_sfxii_store, B_UID(876) };
+static const struct builtin B22_vec_stvx = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:22", "*stvx", CODE_FOR_sfxii_store, B_UID(877) };
+static const struct builtin B23_vec_stvx = { { &T_vec_s8, &T_int, &T_vec_s8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:23", "*stvx", CODE_FOR_sfxii_store, B_UID(878) };
+static const struct builtin B24_vec_stvx = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:24", "*stvx", CODE_FOR_sfxii_store, B_UID(879) };
+static const struct builtin B25_vec_stvx = { { &T_vec_u16, &T_int, &T_vec_u16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:25", "*stvx", CODE_FOR_sfxii_store, B_UID(880) };
+static const struct builtin B26_vec_stvx = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:26", "*stvx", CODE_FOR_sfxii_store, B_UID(881) };
+static const struct builtin B27_vec_stvx = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:27", "*stvx", CODE_FOR_sfxii_store, B_UID(882) };
+static const struct builtin B28_vec_stvx = { { &T_vec_u32, &T_int, &T_vec_u32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:28", "*stvx", CODE_FOR_sfxii_store, B_UID(883) };
+static const struct builtin B29_vec_stvx = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:29", "*stvx", CODE_FOR_sfxii_store, B_UID(884) };
+static const struct builtin B30_vec_stvx = { { &T_vec_u8, &T_int, &T_vec_u8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:30", "*stvx", CODE_FOR_sfxii_store, B_UID(885) };
+static const struct builtin B1_vec_stvebx = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:1", "*stvebx", CODE_FOR_sfxii_store, B_UID(886) };
+static const struct builtin B2_vec_stvebx = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:2", "*stvebx", CODE_FOR_sfxii_store, B_UID(887) };
+static const struct builtin B1_vec_stvewx = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:1", "*stvewx", CODE_FOR_sfxii_store, B_UID(888) };
+static const struct builtin B2_vec_stvewx = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:2", "*stvewx", CODE_FOR_sfxii_store, B_UID(889) };
+static const struct builtin B3_vec_stvewx = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:3", "*stvewx", CODE_FOR_sfxii_store, B_UID(890) };
+static const struct builtin B4_vec_stvewx = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:4", "*stvewx", CODE_FOR_sfxii_store, B_UID(891) };
+static const struct builtin B3_vec_stvebx = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:3", "*stvebx", CODE_FOR_sfxii_store, B_UID(892) };
+static const struct builtin B4_vec_stvebx = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:4", "*stvebx", CODE_FOR_sfxii_store, B_UID(893) };
+static const struct builtin B5_vec_stvewx = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:5", "*stvewx", CODE_FOR_sfxii_store, B_UID(894) };
+static const struct builtin B1_vec_stvehx = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:1", "*stvehx", CODE_FOR_sfxii_store, B_UID(895) };
+static const struct builtin B2_vec_stvehx = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:2", "*stvehx", CODE_FOR_sfxii_store, B_UID(896) };
+static const struct builtin B3_vec_stvehx = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:3", "*stvehx", CODE_FOR_sfxii_store, B_UID(897) };
+static const struct builtin B6_vec_stvewx = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:6", "*stvewx", CODE_FOR_sfxii_store, B_UID(898) };
+static const struct builtin B7_vec_stvewx = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:7", "*stvewx", CODE_FOR_sfxii_store, B_UID(899) };
+static const struct builtin B5_vec_stvebx = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:5", "*stvebx", CODE_FOR_sfxii_store, B_UID(900) };
+static const struct builtin B4_vec_stvehx = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:4", "*stvehx", CODE_FOR_sfxii_store, B_UID(901) };
+static const struct builtin B8_vec_stvewx = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:8", "*stvewx", CODE_FOR_sfxii_store, B_UID(902) };
+static const struct builtin B9_vec_stvewx = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:9", "*stvewx", CODE_FOR_sfxii_store, B_UID(903) };
+static const struct builtin B6_vec_stvebx = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:6", "*stvebx", CODE_FOR_sfxii_store, B_UID(904) };
+static const struct builtin B1_vec_stvxl = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:1", "*stvxl", CODE_FOR_sfxii_store, B_UID(905) };
+static const struct builtin B2_vec_stvxl = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:2", "*stvxl", CODE_FOR_sfxii_store, B_UID(906) };
+static const struct builtin B3_vec_stvxl = { { &T_vec_b16, &T_int, &T_vec_b16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:3", "*stvxl", CODE_FOR_sfxii_store, B_UID(907) };
+static const struct builtin B4_vec_stvxl = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:4", "*stvxl", CODE_FOR_sfxii_store, B_UID(908) };
+static const struct builtin B5_vec_stvxl = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:5", "*stvxl", CODE_FOR_sfxii_store, B_UID(909) };
+static const struct builtin B6_vec_stvxl = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:6", "*stvxl", CODE_FOR_sfxii_store, B_UID(910) };
+static const struct builtin B7_vec_stvxl = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:7", "*stvxl", CODE_FOR_sfxii_store, B_UID(911) };
+static const struct builtin B8_vec_stvxl = { { &T_vec_b32, &T_int, &T_vec_b32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:8", "*stvxl", CODE_FOR_sfxii_store, B_UID(912) };
+static const struct builtin B9_vec_stvxl = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:9", "*stvxl", CODE_FOR_sfxii_store, B_UID(913) };
+static const struct builtin B10_vec_stvxl = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:10", "*stvxl", CODE_FOR_sfxii_store, B_UID(914) };
+static const struct builtin B11_vec_stvxl = { { &T_vec_b8, &T_int, &T_vec_b8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:11", "*stvxl", CODE_FOR_sfxii_store, B_UID(915) };
+static const struct builtin B12_vec_stvxl = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:12", "*stvxl", CODE_FOR_sfxii_store, B_UID(916) };
+static const struct builtin B13_vec_stvxl = { { &T_vec_f32, &T_int, &T_vec_f32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:13", "*stvxl", CODE_FOR_sfxii_store, B_UID(917) };
+static const struct builtin B14_vec_stvxl = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:14", "*stvxl", CODE_FOR_sfxii_store, B_UID(918) };
+static const struct builtin B15_vec_stvxl = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:15", "*stvxl", CODE_FOR_sfxii_store, B_UID(919) };
+static const struct builtin B16_vec_stvxl = { { &T_vec_p16, &T_int, &T_vec_p16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:16", "*stvxl", CODE_FOR_sfxii_store, B_UID(920) };
+static const struct builtin B17_vec_stvxl = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:17", "*stvxl", CODE_FOR_sfxii_store, B_UID(921) };
+static const struct builtin B18_vec_stvxl = { { &T_vec_s16, &T_int, &T_vec_s16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:18", "*stvxl", CODE_FOR_sfxii_store, B_UID(922) };
+static const struct builtin B19_vec_stvxl = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:19", "*stvxl", CODE_FOR_sfxii_store, B_UID(923) };
+static const struct builtin B20_vec_stvxl = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:20", "*stvxl", CODE_FOR_sfxii_store, B_UID(924) };
+static const struct builtin B21_vec_stvxl = { { &T_vec_s32, &T_int, &T_vec_s32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:21", "*stvxl", CODE_FOR_sfxii_store, B_UID(925) };
+static const struct builtin B22_vec_stvxl = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:22", "*stvxl", CODE_FOR_sfxii_store, B_UID(926) };
+static const struct builtin B23_vec_stvxl = { { &T_vec_s8, &T_int, &T_vec_s8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:23", "*stvxl", CODE_FOR_sfxii_store, B_UID(927) };
+static const struct builtin B24_vec_stvxl = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:24", "*stvxl", CODE_FOR_sfxii_store, B_UID(928) };
+static const struct builtin B25_vec_stvxl = { { &T_vec_u16, &T_int, &T_vec_u16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:25", "*stvxl", CODE_FOR_sfxii_store, B_UID(929) };
+static const struct builtin B26_vec_stvxl = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:26", "*stvxl", CODE_FOR_sfxii_store, B_UID(930) };
+static const struct builtin B27_vec_stvxl = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:27", "*stvxl", CODE_FOR_sfxii_store, B_UID(931) };
+static const struct builtin B28_vec_stvxl = { { &T_vec_u32, &T_int, &T_vec_u32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:28", "*stvxl", CODE_FOR_sfxii_store, B_UID(932) };
+static const struct builtin B29_vec_stvxl = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:29", "*stvxl", CODE_FOR_sfxii_store, B_UID(933) };
+static const struct builtin B30_vec_stvxl = { { &T_vec_u8, &T_int, &T_vec_u8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:30", "*stvxl", CODE_FOR_sfxii_store, B_UID(934) };
+static const struct builtin B1_vec_vsubuhm = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:1", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(935) };
+static const struct builtin B2_vec_vsubuhm = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:2", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(936) };
+static const struct builtin B1_vec_vsubuwm = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:1", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(937) };
+static const struct builtin B2_vec_vsubuwm = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:2", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(938) };
+static const struct builtin B1_vec_vsububm = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:1", "*vsububm", CODE_FOR_xfxx_simple, B_UID(939) };
+static const struct builtin B2_vec_vsububm = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:2", "*vsububm", CODE_FOR_xfxx_simple, B_UID(940) };
+static const struct builtin B_vec_vsubfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vsubfp", "*vsubfp", CODE_FOR_xfxx_fp, B_UID(941) };
+static const struct builtin B3_vec_vsubuhm = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:3", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(942) };
+static const struct builtin B4_vec_vsubuhm = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:4", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(943) };
+static const struct builtin B3_vec_vsubuwm = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:3", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(944) };
+static const struct builtin B4_vec_vsubuwm = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:4", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(945) };
+static const struct builtin B3_vec_vsububm = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:3", "*vsububm", CODE_FOR_xfxx_simple, B_UID(946) };
+static const struct builtin B4_vec_vsububm = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:4", "*vsububm", CODE_FOR_xfxx_simple, B_UID(947) };
+static const struct builtin B5_vec_vsubuhm = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:5", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(948) };
+static const struct builtin B6_vec_vsubuhm = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:6", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(949) };
+static const struct builtin B5_vec_vsubuwm = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:5", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(950) };
+static const struct builtin B6_vec_vsubuwm = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:6", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(951) };
+static const struct builtin B5_vec_vsububm = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:5", "*vsububm", CODE_FOR_xfxx_simple, B_UID(952) };
+static const struct builtin B6_vec_vsububm = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:6", "*vsububm", CODE_FOR_xfxx_simple, B_UID(953) };
+static const struct builtin B_vec_vsubcuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsubcuw", "*vsubcuw", CODE_FOR_xfxx_simple, B_UID(954) };
+static const struct builtin B1_vec_vsubshs = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:1", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(955) };
+static const struct builtin B1_vec_vsubuhs = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:1", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(956) };
+static const struct builtin B1_vec_vsubsws = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:1", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(957) };
+static const struct builtin B1_vec_vsubuws = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:1", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(958) };
+static const struct builtin B1_vec_vsubsbs = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:1", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(959) };
+static const struct builtin B1_vec_vsububs = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:1", "*vsububs", CODE_FOR_xfxx_simple, B_UID(960) };
+static const struct builtin B2_vec_vsubshs = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:2", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(961) };
+static const struct builtin B3_vec_vsubshs = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:3", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(962) };
+static const struct builtin B2_vec_vsubsws = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:2", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(963) };
+static const struct builtin B3_vec_vsubsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:3", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(964) };
+static const struct builtin B2_vec_vsubsbs = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:2", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(965) };
+static const struct builtin B3_vec_vsubsbs = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:3", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(966) };
+static const struct builtin B2_vec_vsubuhs = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:2", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(967) };
+static const struct builtin B3_vec_vsubuhs = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:3", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(968) };
+static const struct builtin B2_vec_vsubuws = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:2", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(969) };
+static const struct builtin B3_vec_vsubuws = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:3", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(970) };
+static const struct builtin B2_vec_vsububs = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:2", "*vsububs", CODE_FOR_xfxx_simple, B_UID(971) };
+static const struct builtin B3_vec_vsububs = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:3", "*vsububs", CODE_FOR_xfxx_simple, B_UID(972) };
+static const struct builtin B_vec_vsum2sws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum2sws", "*vsum2sws", CODE_FOR_xfxx_complex, B_UID(973) };
+static const struct builtin B_vec_vsum4shs = { { &T_vec_s16, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum4shs", "*vsum4shs", CODE_FOR_xfxx_complex, B_UID(974) };
+static const struct builtin B_vec_vsum4sbs = { { &T_vec_s8, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum4sbs", "*vsum4sbs", CODE_FOR_xfxx_complex, B_UID(975) };
+static const struct builtin B_vec_vsum4ubs = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsum4ubs", "*vsum4ubs", CODE_FOR_xfxx_complex, B_UID(976) };
+static const struct builtin B_vec_vsumsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsumsws", "*vsumsws", CODE_FOR_xfxx_complex, B_UID(977) };
+static const struct builtin B_vec_vrfiz = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfiz", "*vrfiz", CODE_FOR_xfx_fp, B_UID(978) };
+static const struct builtin B1_vec_unpack2sh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_unpack2sh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(979) };
+static const struct builtin B2_vec_unpack2sh = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_unpack2sh:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(980) };
+static const struct builtin B1_vec_unpack2sl = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_unpack2sl:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(981) };
+static const struct builtin B2_vec_unpack2sl = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_unpack2sl:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(982) };
+static const struct builtin B1_vec_unpack2uh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_unpack2uh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(983) };
+static const struct builtin B2_vec_unpack2uh = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_unpack2uh:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(984) };
+static const struct builtin B1_vec_unpack2ul = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_unpack2ul:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(985) };
+static const struct builtin B2_vec_unpack2ul = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_unpack2ul:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(986) };
+static const struct builtin B1_vec_vupkhsh = { { &T_vec_b16, NULL, NULL, }, "x", &T_vec_b32, 1, FALSE, FALSE, 0, "vec_vupkhsh:1", "*vupkhsh", CODE_FOR_xfx_perm, B_UID(987) };
+static const struct builtin B1_vec_vupkhsb = { { &T_vec_b8, NULL, NULL, }, "x", &T_vec_b16, 1, FALSE, FALSE, 0, "vec_vupkhsb:1", "*vupkhsb", CODE_FOR_xfx_perm, B_UID(988) };
+static const struct builtin B_vec_vupkhpx = { { &T_vec_p16, NULL, NULL, }, "x", &T_vec_u32, 1, FALSE, FALSE, 0, "vec_vupkhpx", "*vupkhpx", CODE_FOR_xfx_perm, B_UID(989) };
+static const struct builtin B2_vec_vupkhsh = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 0, "vec_vupkhsh:2", "*vupkhsh", CODE_FOR_xfx_perm, B_UID(990) };
+static const struct builtin B2_vec_vupkhsb = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 0, "vec_vupkhsb:2", "*vupkhsb", CODE_FOR_xfx_perm, B_UID(991) };
+static const struct builtin B1_vec_vupklsh = { { &T_vec_b16, NULL, NULL, }, "x", &T_vec_b32, 1, FALSE, FALSE, 0, "vec_vupklsh:1", "*vupklsh", CODE_FOR_xfx_perm, B_UID(992) };
+static const struct builtin B1_vec_vupklsb = { { &T_vec_b8, NULL, NULL, }, "x", &T_vec_b16, 1, FALSE, FALSE, 0, "vec_vupklsb:1", "*vupklsb", CODE_FOR_xfx_perm, B_UID(993) };
+static const struct builtin B_vec_vupklpx = { { &T_vec_p16, NULL, NULL, }, "x", &T_vec_u32, 1, FALSE, FALSE, 0, "vec_vupklpx", "*vupklpx", CODE_FOR_xfx_perm, B_UID(994) };
+static const struct builtin B2_vec_vupklsh = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 0, "vec_vupklsh:2", "*vupklsh", CODE_FOR_xfx_perm, B_UID(995) };
+static const struct builtin B2_vec_vupklsb = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 0, "vec_vupklsb:2", "*vupklsb", CODE_FOR_xfx_perm, B_UID(996) };
+static const struct builtin B1_vec_vxor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 1, "vec_vxor:1", "*vxor", CODE_FOR_xfxx_simple, B_UID(997) };
+static const struct builtin B2_vec_vxor = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:2", "*vxor", CODE_FOR_xfxx_simple, B_UID(998) };
+static const struct builtin B3_vec_vxor = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:3", "*vxor", CODE_FOR_xfxx_simple, B_UID(999) };
+static const struct builtin B4_vec_vxor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 1, "vec_vxor:4", "*vxor", CODE_FOR_xfxx_simple, B_UID(1000) };
+static const struct builtin B5_vec_vxor = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:5", "*vxor", CODE_FOR_xfxx_simple, B_UID(1001) };
+static const struct builtin B6_vec_vxor = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:6", "*vxor", CODE_FOR_xfxx_simple, B_UID(1002) };
+static const struct builtin B7_vec_vxor = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:7", "*vxor", CODE_FOR_xfxx_simple, B_UID(1003) };
+static const struct builtin B8_vec_vxor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 1, "vec_vxor:8", "*vxor", CODE_FOR_xfxx_simple, B_UID(1004) };
+static const struct builtin B9_vec_vxor = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:9", "*vxor", CODE_FOR_xfxx_simple, B_UID(1005) };
+static const struct builtin B10_vec_vxor = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:10", "*vxor", CODE_FOR_xfxx_simple, B_UID(1006) };
+static const struct builtin B11_vec_vxor = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:11", "*vxor", CODE_FOR_xfxx_simple, B_UID(1007) };
+static const struct builtin B12_vec_vxor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:12", "*vxor", CODE_FOR_xfxx_simple, B_UID(1008) };
+static const struct builtin B13_vec_vxor = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:13", "*vxor", CODE_FOR_xfxx_simple, B_UID(1009) };
+static const struct builtin B14_vec_vxor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:14", "*vxor", CODE_FOR_xfxx_simple, B_UID(1010) };
+static const struct builtin B15_vec_vxor = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:15", "*vxor", CODE_FOR_xfxx_simple, B_UID(1011) };
+static const struct builtin B16_vec_vxor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:16", "*vxor", CODE_FOR_xfxx_simple, B_UID(1012) };
+static const struct builtin B17_vec_vxor = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:17", "*vxor", CODE_FOR_xfxx_simple, B_UID(1013) };
+static const struct builtin B18_vec_vxor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:18", "*vxor", CODE_FOR_xfxx_simple, B_UID(1014) };
+static const struct builtin B19_vec_vxor = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:19", "*vxor", CODE_FOR_xfxx_simple, B_UID(1015) };
+static const struct builtin B20_vec_vxor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:20", "*vxor", CODE_FOR_xfxx_simple, B_UID(1016) };
+static const struct builtin B21_vec_vxor = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:21", "*vxor", CODE_FOR_xfxx_simple, B_UID(1017) };
+static const struct builtin B22_vec_vxor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:22", "*vxor", CODE_FOR_xfxx_simple, B_UID(1018) };
+static const struct builtin B23_vec_vxor = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:23", "*vxor", CODE_FOR_xfxx_simple, B_UID(1019) };
+static const struct builtin B24_vec_vxor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:24", "*vxor", CODE_FOR_xfxx_simple, B_UID(1020) };
+#define LAST_B_UID B_UID(1021)
+
+const struct builtin * const Builtin[] = {
+ &B1_vec_abs,
+ &B2_vec_abs,
+ &B3_vec_abs,
+ &B4_vec_abs,
+ &B1_vec_abss,
+ &B2_vec_abss,
+ &B3_vec_abss,
+ &B1_vec_vadduhm,
+ &B2_vec_vadduhm,
+ &B1_vec_vadduwm,
+ &B2_vec_vadduwm,
+ &B1_vec_vaddubm,
+ &B2_vec_vaddubm,
+ &B_vec_vaddfp,
+ &B3_vec_vadduhm,
+ &B4_vec_vadduhm,
+ &B3_vec_vadduwm,
+ &B4_vec_vadduwm,
+ &B3_vec_vaddubm,
+ &B4_vec_vaddubm,
+ &B5_vec_vadduhm,
+ &B6_vec_vadduhm,
+ &B5_vec_vadduwm,
+ &B6_vec_vadduwm,
+ &B5_vec_vaddubm,
+ &B6_vec_vaddubm,
+ &B_vec_vaddcuw,
+ &B1_vec_vaddshs,
+ &B1_vec_vadduhs,
+ &B1_vec_vaddsws,
+ &B1_vec_vadduws,
+ &B1_vec_vaddsbs,
+ &B1_vec_vaddubs,
+ &B2_vec_vaddshs,
+ &B3_vec_vaddshs,
+ &B2_vec_vaddsws,
+ &B3_vec_vaddsws,
+ &B2_vec_vaddsbs,
+ &B3_vec_vaddsbs,
+ &B2_vec_vadduhs,
+ &B3_vec_vadduhs,
+ &B2_vec_vadduws,
+ &B3_vec_vadduws,
+ &B2_vec_vaddubs,
+ &B3_vec_vaddubs,
+ &B1_vec_all_eq,
+ &B2_vec_all_eq,
+ &B3_vec_all_eq,
+ &B4_vec_all_eq,
+ &B5_vec_all_eq,
+ &B6_vec_all_eq,
+ &B7_vec_all_eq,
+ &B8_vec_all_eq,
+ &B9_vec_all_eq,
+ &B10_vec_all_eq,
+ &B11_vec_all_eq,
+ &B12_vec_all_eq,
+ &B13_vec_all_eq,
+ &B14_vec_all_eq,
+ &B15_vec_all_eq,
+ &B16_vec_all_eq,
+ &B17_vec_all_eq,
+ &B18_vec_all_eq,
+ &B19_vec_all_eq,
+ &B20_vec_all_eq,
+ &B21_vec_all_eq,
+ &B22_vec_all_eq,
+ &B23_vec_all_eq,
+ &B1_vec_all_ge,
+ &B2_vec_all_ge,
+ &B3_vec_all_ge,
+ &B4_vec_all_ge,
+ &B5_vec_all_ge,
+ &B6_vec_all_ge,
+ &B7_vec_all_ge,
+ &B8_vec_all_ge,
+ &B9_vec_all_ge,
+ &B10_vec_all_ge,
+ &B11_vec_all_ge,
+ &B12_vec_all_ge,
+ &B13_vec_all_ge,
+ &B14_vec_all_ge,
+ &B15_vec_all_ge,
+ &B16_vec_all_ge,
+ &B17_vec_all_ge,
+ &B18_vec_all_ge,
+ &B19_vec_all_ge,
+ &B1_vec_all_gt,
+ &B2_vec_all_gt,
+ &B3_vec_all_gt,
+ &B4_vec_all_gt,
+ &B5_vec_all_gt,
+ &B6_vec_all_gt,
+ &B7_vec_all_gt,
+ &B8_vec_all_gt,
+ &B9_vec_all_gt,
+ &B10_vec_all_gt,
+ &B11_vec_all_gt,
+ &B12_vec_all_gt,
+ &B13_vec_all_gt,
+ &B14_vec_all_gt,
+ &B15_vec_all_gt,
+ &B16_vec_all_gt,
+ &B17_vec_all_gt,
+ &B18_vec_all_gt,
+ &B19_vec_all_gt,
+ &B_vec_all_in,
+ &B1_vec_all_le,
+ &B2_vec_all_le,
+ &B3_vec_all_le,
+ &B4_vec_all_le,
+ &B5_vec_all_le,
+ &B6_vec_all_le,
+ &B7_vec_all_le,
+ &B8_vec_all_le,
+ &B9_vec_all_le,
+ &B10_vec_all_le,
+ &B11_vec_all_le,
+ &B12_vec_all_le,
+ &B13_vec_all_le,
+ &B14_vec_all_le,
+ &B15_vec_all_le,
+ &B16_vec_all_le,
+ &B17_vec_all_le,
+ &B18_vec_all_le,
+ &B19_vec_all_le,
+ &B1_vec_all_lt,
+ &B2_vec_all_lt,
+ &B3_vec_all_lt,
+ &B4_vec_all_lt,
+ &B5_vec_all_lt,
+ &B6_vec_all_lt,
+ &B7_vec_all_lt,
+ &B8_vec_all_lt,
+ &B9_vec_all_lt,
+ &B10_vec_all_lt,
+ &B11_vec_all_lt,
+ &B12_vec_all_lt,
+ &B13_vec_all_lt,
+ &B14_vec_all_lt,
+ &B15_vec_all_lt,
+ &B16_vec_all_lt,
+ &B17_vec_all_lt,
+ &B18_vec_all_lt,
+ &B19_vec_all_lt,
+ &B_vec_all_nan,
+ &B1_vec_all_ne,
+ &B2_vec_all_ne,
+ &B3_vec_all_ne,
+ &B4_vec_all_ne,
+ &B5_vec_all_ne,
+ &B6_vec_all_ne,
+ &B7_vec_all_ne,
+ &B8_vec_all_ne,
+ &B9_vec_all_ne,
+ &B10_vec_all_ne,
+ &B11_vec_all_ne,
+ &B12_vec_all_ne,
+ &B13_vec_all_ne,
+ &B14_vec_all_ne,
+ &B15_vec_all_ne,
+ &B16_vec_all_ne,
+ &B17_vec_all_ne,
+ &B18_vec_all_ne,
+ &B19_vec_all_ne,
+ &B20_vec_all_ne,
+ &B21_vec_all_ne,
+ &B22_vec_all_ne,
+ &B23_vec_all_ne,
+ &B_vec_all_nge,
+ &B_vec_all_ngt,
+ &B_vec_all_nle,
+ &B_vec_all_nlt,
+ &B_vec_all_numeric,
+ &B1_vec_vand,
+ &B2_vec_vand,
+ &B3_vec_vand,
+ &B4_vec_vand,
+ &B5_vec_vand,
+ &B6_vec_vand,
+ &B7_vec_vand,
+ &B8_vec_vand,
+ &B9_vec_vand,
+ &B10_vec_vand,
+ &B11_vec_vand,
+ &B12_vec_vand,
+ &B13_vec_vand,
+ &B14_vec_vand,
+ &B15_vec_vand,
+ &B16_vec_vand,
+ &B17_vec_vand,
+ &B18_vec_vand,
+ &B19_vec_vand,
+ &B20_vec_vand,
+ &B21_vec_vand,
+ &B22_vec_vand,
+ &B23_vec_vand,
+ &B24_vec_vand,
+ &B1_vec_vandc,
+ &B2_vec_vandc,
+ &B3_vec_vandc,
+ &B4_vec_vandc,
+ &B5_vec_vandc,
+ &B6_vec_vandc,
+ &B7_vec_vandc,
+ &B8_vec_vandc,
+ &B9_vec_vandc,
+ &B10_vec_vandc,
+ &B11_vec_vandc,
+ &B12_vec_vandc,
+ &B13_vec_vandc,
+ &B14_vec_vandc,
+ &B15_vec_vandc,
+ &B16_vec_vandc,
+ &B17_vec_vandc,
+ &B18_vec_vandc,
+ &B19_vec_vandc,
+ &B20_vec_vandc,
+ &B21_vec_vandc,
+ &B22_vec_vandc,
+ &B23_vec_vandc,
+ &B24_vec_vandc,
+ &B1_vec_any_eq,
+ &B2_vec_any_eq,
+ &B3_vec_any_eq,
+ &B4_vec_any_eq,
+ &B5_vec_any_eq,
+ &B6_vec_any_eq,
+ &B7_vec_any_eq,
+ &B8_vec_any_eq,
+ &B9_vec_any_eq,
+ &B10_vec_any_eq,
+ &B11_vec_any_eq,
+ &B12_vec_any_eq,
+ &B13_vec_any_eq,
+ &B14_vec_any_eq,
+ &B15_vec_any_eq,
+ &B16_vec_any_eq,
+ &B17_vec_any_eq,
+ &B18_vec_any_eq,
+ &B19_vec_any_eq,
+ &B20_vec_any_eq,
+ &B21_vec_any_eq,
+ &B22_vec_any_eq,
+ &B23_vec_any_eq,
+ &B1_vec_any_ge,
+ &B2_vec_any_ge,
+ &B3_vec_any_ge,
+ &B4_vec_any_ge,
+ &B5_vec_any_ge,
+ &B6_vec_any_ge,
+ &B7_vec_any_ge,
+ &B8_vec_any_ge,
+ &B9_vec_any_ge,
+ &B10_vec_any_ge,
+ &B11_vec_any_ge,
+ &B12_vec_any_ge,
+ &B13_vec_any_ge,
+ &B14_vec_any_ge,
+ &B15_vec_any_ge,
+ &B16_vec_any_ge,
+ &B17_vec_any_ge,
+ &B18_vec_any_ge,
+ &B19_vec_any_ge,
+ &B1_vec_any_gt,
+ &B2_vec_any_gt,
+ &B3_vec_any_gt,
+ &B4_vec_any_gt,
+ &B5_vec_any_gt,
+ &B6_vec_any_gt,
+ &B7_vec_any_gt,
+ &B8_vec_any_gt,
+ &B9_vec_any_gt,
+ &B10_vec_any_gt,
+ &B11_vec_any_gt,
+ &B12_vec_any_gt,
+ &B13_vec_any_gt,
+ &B14_vec_any_gt,
+ &B15_vec_any_gt,
+ &B16_vec_any_gt,
+ &B17_vec_any_gt,
+ &B18_vec_any_gt,
+ &B19_vec_any_gt,
+ &B1_vec_any_le,
+ &B2_vec_any_le,
+ &B3_vec_any_le,
+ &B4_vec_any_le,
+ &B5_vec_any_le,
+ &B6_vec_any_le,
+ &B7_vec_any_le,
+ &B8_vec_any_le,
+ &B9_vec_any_le,
+ &B10_vec_any_le,
+ &B11_vec_any_le,
+ &B12_vec_any_le,
+ &B13_vec_any_le,
+ &B14_vec_any_le,
+ &B15_vec_any_le,
+ &B16_vec_any_le,
+ &B17_vec_any_le,
+ &B18_vec_any_le,
+ &B19_vec_any_le,
+ &B1_vec_any_lt,
+ &B2_vec_any_lt,
+ &B3_vec_any_lt,
+ &B4_vec_any_lt,
+ &B5_vec_any_lt,
+ &B6_vec_any_lt,
+ &B7_vec_any_lt,
+ &B8_vec_any_lt,
+ &B9_vec_any_lt,
+ &B10_vec_any_lt,
+ &B11_vec_any_lt,
+ &B12_vec_any_lt,
+ &B13_vec_any_lt,
+ &B14_vec_any_lt,
+ &B15_vec_any_lt,
+ &B16_vec_any_lt,
+ &B17_vec_any_lt,
+ &B18_vec_any_lt,
+ &B19_vec_any_lt,
+ &B_vec_any_nan,
+ &B1_vec_any_ne,
+ &B2_vec_any_ne,
+ &B3_vec_any_ne,
+ &B4_vec_any_ne,
+ &B5_vec_any_ne,
+ &B6_vec_any_ne,
+ &B7_vec_any_ne,
+ &B8_vec_any_ne,
+ &B9_vec_any_ne,
+ &B10_vec_any_ne,
+ &B11_vec_any_ne,
+ &B12_vec_any_ne,
+ &B13_vec_any_ne,
+ &B14_vec_any_ne,
+ &B15_vec_any_ne,
+ &B16_vec_any_ne,
+ &B17_vec_any_ne,
+ &B18_vec_any_ne,
+ &B19_vec_any_ne,
+ &B20_vec_any_ne,
+ &B21_vec_any_ne,
+ &B22_vec_any_ne,
+ &B23_vec_any_ne,
+ &B_vec_any_nge,
+ &B_vec_any_ngt,
+ &B_vec_any_nle,
+ &B_vec_any_nlt,
+ &B_vec_any_numeric,
+ &B_vec_any_out,
+ &B_vec_vavgsh,
+ &B_vec_vavgsw,
+ &B_vec_vavgsb,
+ &B_vec_vavguh,
+ &B_vec_vavguw,
+ &B_vec_vavgub,
+ &B_vec_vrfip,
+ &B_vec_vcmpbfp,
+ &B_vec_vcmpeqfp,
+ &B1_vec_vcmpequh,
+ &B1_vec_vcmpequw,
+ &B1_vec_vcmpequb,
+ &B2_vec_vcmpequh,
+ &B2_vec_vcmpequw,
+ &B2_vec_vcmpequb,
+ &B_vec_vcmpgefp,
+ &B_vec_vcmpgtfp,
+ &B_vec_vcmpgtsh,
+ &B_vec_vcmpgtsw,
+ &B_vec_vcmpgtsb,
+ &B_vec_vcmpgtuh,
+ &B_vec_vcmpgtuw,
+ &B_vec_vcmpgtub,
+ &B_vec_cmple,
+ &B1_vec_cmplt,
+ &B2_vec_cmplt,
+ &B3_vec_cmplt,
+ &B4_vec_cmplt,
+ &B5_vec_cmplt,
+ &B6_vec_cmplt,
+ &B7_vec_cmplt,
+ &B_vec_vcfsx,
+ &B_vec_vcfux,
+ &B_vec_vctsxs,
+ &B_vec_vctuxs,
+ &B_vec_dss,
+ &B_vec_dssall,
+ &B1_vec_dst,
+ &B2_vec_dst,
+ &B3_vec_dst,
+ &B4_vec_dst,
+ &B5_vec_dst,
+ &B6_vec_dst,
+ &B7_vec_dst,
+ &B8_vec_dst,
+ &B9_vec_dst,
+ &B10_vec_dst,
+ &B11_vec_dst,
+ &B12_vec_dst,
+ &B13_vec_dst,
+ &B14_vec_dst,
+ &B15_vec_dst,
+ &B16_vec_dst,
+ &B17_vec_dst,
+ &B18_vec_dst,
+ &B19_vec_dst,
+ &B20_vec_dst,
+ &B1_vec_dstst,
+ &B2_vec_dstst,
+ &B3_vec_dstst,
+ &B4_vec_dstst,
+ &B5_vec_dstst,
+ &B6_vec_dstst,
+ &B7_vec_dstst,
+ &B8_vec_dstst,
+ &B9_vec_dstst,
+ &B10_vec_dstst,
+ &B11_vec_dstst,
+ &B12_vec_dstst,
+ &B13_vec_dstst,
+ &B14_vec_dstst,
+ &B15_vec_dstst,
+ &B16_vec_dstst,
+ &B17_vec_dstst,
+ &B18_vec_dstst,
+ &B19_vec_dstst,
+ &B20_vec_dstst,
+ &B1_vec_dststt,
+ &B2_vec_dststt,
+ &B3_vec_dststt,
+ &B4_vec_dststt,
+ &B5_vec_dststt,
+ &B6_vec_dststt,
+ &B7_vec_dststt,
+ &B8_vec_dststt,
+ &B9_vec_dststt,
+ &B10_vec_dststt,
+ &B11_vec_dststt,
+ &B12_vec_dststt,
+ &B13_vec_dststt,
+ &B14_vec_dststt,
+ &B15_vec_dststt,
+ &B16_vec_dststt,
+ &B17_vec_dststt,
+ &B18_vec_dststt,
+ &B19_vec_dststt,
+ &B20_vec_dststt,
+ &B1_vec_dstt,
+ &B2_vec_dstt,
+ &B3_vec_dstt,
+ &B4_vec_dstt,
+ &B5_vec_dstt,
+ &B6_vec_dstt,
+ &B7_vec_dstt,
+ &B8_vec_dstt,
+ &B9_vec_dstt,
+ &B10_vec_dstt,
+ &B11_vec_dstt,
+ &B12_vec_dstt,
+ &B13_vec_dstt,
+ &B14_vec_dstt,
+ &B15_vec_dstt,
+ &B16_vec_dstt,
+ &B17_vec_dstt,
+ &B18_vec_dstt,
+ &B19_vec_dstt,
+ &B20_vec_dstt,
+ &B_vec_vexptefp,
+ &B_vec_vrfim,
+ &B1_vec_lvx,
+ &B2_vec_lvx,
+ &B3_vec_lvx,
+ &B4_vec_lvx,
+ &B5_vec_lvx,
+ &B6_vec_lvx,
+ &B7_vec_lvx,
+ &B8_vec_lvx,
+ &B9_vec_lvx,
+ &B10_vec_lvx,
+ &B11_vec_lvx,
+ &B12_vec_lvx,
+ &B13_vec_lvx,
+ &B14_vec_lvx,
+ &B15_vec_lvx,
+ &B16_vec_lvx,
+ &B17_vec_lvx,
+ &B18_vec_lvx,
+ &B19_vec_lvx,
+ &B20_vec_lvx,
+ &B1_vec_lvewx,
+ &B2_vec_lvewx,
+ &B3_vec_lvewx,
+ &B1_vec_lvehx,
+ &B1_vec_lvebx,
+ &B2_vec_lvebx,
+ &B4_vec_lvewx,
+ &B5_vec_lvewx,
+ &B2_vec_lvehx,
+ &B1_vec_lvxl,
+ &B2_vec_lvxl,
+ &B3_vec_lvxl,
+ &B4_vec_lvxl,
+ &B5_vec_lvxl,
+ &B6_vec_lvxl,
+ &B7_vec_lvxl,
+ &B8_vec_lvxl,
+ &B9_vec_lvxl,
+ &B10_vec_lvxl,
+ &B11_vec_lvxl,
+ &B12_vec_lvxl,
+ &B13_vec_lvxl,
+ &B14_vec_lvxl,
+ &B15_vec_lvxl,
+ &B16_vec_lvxl,
+ &B17_vec_lvxl,
+ &B18_vec_lvxl,
+ &B19_vec_lvxl,
+ &B20_vec_lvxl,
+ &B_vec_vlogefp,
+ &B1_vec_lvsl,
+ &B2_vec_lvsl,
+ &B3_vec_lvsl,
+ &B4_vec_lvsl,
+ &B5_vec_lvsl,
+ &B6_vec_lvsl,
+ &B7_vec_lvsl,
+ &B8_vec_lvsl,
+ &B9_vec_lvsl,
+ &B1_vec_lvsr,
+ &B2_vec_lvsr,
+ &B3_vec_lvsr,
+ &B4_vec_lvsr,
+ &B5_vec_lvsr,
+ &B6_vec_lvsr,
+ &B7_vec_lvsr,
+ &B8_vec_lvsr,
+ &B9_vec_lvsr,
+ &B_vec_vmaddfp,
+ &B_vec_vmhaddshs,
+ &B1_vec_vmaxsh,
+ &B1_vec_vmaxuh,
+ &B1_vec_vmaxsw,
+ &B1_vec_vmaxuw,
+ &B1_vec_vmaxsb,
+ &B1_vec_vmaxub,
+ &B_vec_vmaxfp,
+ &B2_vec_vmaxsh,
+ &B3_vec_vmaxsh,
+ &B2_vec_vmaxsw,
+ &B3_vec_vmaxsw,
+ &B2_vec_vmaxsb,
+ &B3_vec_vmaxsb,
+ &B2_vec_vmaxuh,
+ &B3_vec_vmaxuh,
+ &B2_vec_vmaxuw,
+ &B3_vec_vmaxuw,
+ &B2_vec_vmaxub,
+ &B3_vec_vmaxub,
+ &B1_vec_vmrghh,
+ &B1_vec_vmrghw,
+ &B1_vec_vmrghb,
+ &B2_vec_vmrghw,
+ &B2_vec_vmrghh,
+ &B3_vec_vmrghh,
+ &B3_vec_vmrghw,
+ &B2_vec_vmrghb,
+ &B4_vec_vmrghh,
+ &B4_vec_vmrghw,
+ &B3_vec_vmrghb,
+ &B1_vec_vmrglh,
+ &B1_vec_vmrglw,
+ &B1_vec_vmrglb,
+ &B2_vec_vmrglw,
+ &B2_vec_vmrglh,
+ &B3_vec_vmrglh,
+ &B3_vec_vmrglw,
+ &B2_vec_vmrglb,
+ &B4_vec_vmrglh,
+ &B4_vec_vmrglw,
+ &B3_vec_vmrglb,
+ &B_vec_mfvscr,
+ &B1_vec_vminsh,
+ &B1_vec_vminuh,
+ &B1_vec_vminsw,
+ &B1_vec_vminuw,
+ &B1_vec_vminsb,
+ &B1_vec_vminub,
+ &B_vec_vminfp,
+ &B2_vec_vminsh,
+ &B3_vec_vminsh,
+ &B2_vec_vminsw,
+ &B3_vec_vminsw,
+ &B2_vec_vminsb,
+ &B3_vec_vminsb,
+ &B2_vec_vminuh,
+ &B3_vec_vminuh,
+ &B2_vec_vminuw,
+ &B3_vec_vminuw,
+ &B2_vec_vminub,
+ &B3_vec_vminub,
+ &B1_vec_vmladduhm,
+ &B2_vec_vmladduhm,
+ &B3_vec_vmladduhm,
+ &B4_vec_vmladduhm,
+ &B_vec_vmhraddshs,
+ &B_vec_vmsumshm,
+ &B_vec_vmsummbm,
+ &B_vec_vmsumuhm,
+ &B_vec_vmsumubm,
+ &B_vec_vmsumshs,
+ &B_vec_vmsumuhs,
+ &B1_vec_mtvscr,
+ &B2_vec_mtvscr,
+ &B3_vec_mtvscr,
+ &B4_vec_mtvscr,
+ &B5_vec_mtvscr,
+ &B6_vec_mtvscr,
+ &B7_vec_mtvscr,
+ &B8_vec_mtvscr,
+ &B9_vec_mtvscr,
+ &B10_vec_mtvscr,
+ &B_vec_vmulesh,
+ &B_vec_vmulesb,
+ &B_vec_vmuleuh,
+ &B_vec_vmuleub,
+ &B_vec_vmulosh,
+ &B_vec_vmulosb,
+ &B_vec_vmulouh,
+ &B_vec_vmuloub,
+ &B_vec_vnmsubfp,
+ &B1_vec_vnor,
+ &B2_vec_vnor,
+ &B3_vec_vnor,
+ &B4_vec_vnor,
+ &B5_vec_vnor,
+ &B6_vec_vnor,
+ &B7_vec_vnor,
+ &B8_vec_vnor,
+ &B9_vec_vnor,
+ &B10_vec_vnor,
+ &B1_vec_vor,
+ &B2_vec_vor,
+ &B3_vec_vor,
+ &B4_vec_vor,
+ &B5_vec_vor,
+ &B6_vec_vor,
+ &B7_vec_vor,
+ &B8_vec_vor,
+ &B9_vec_vor,
+ &B10_vec_vor,
+ &B11_vec_vor,
+ &B12_vec_vor,
+ &B13_vec_vor,
+ &B14_vec_vor,
+ &B15_vec_vor,
+ &B16_vec_vor,
+ &B17_vec_vor,
+ &B18_vec_vor,
+ &B19_vec_vor,
+ &B20_vec_vor,
+ &B21_vec_vor,
+ &B22_vec_vor,
+ &B23_vec_vor,
+ &B24_vec_vor,
+ &B1_vec_vpkuhum,
+ &B1_vec_vpkuwum,
+ &B2_vec_vpkuhum,
+ &B2_vec_vpkuwum,
+ &B3_vec_vpkuhum,
+ &B3_vec_vpkuwum,
+ &B_vec_vpkpx,
+ &B_vec_vpkshss,
+ &B_vec_vpkswss,
+ &B_vec_vpkuhus,
+ &B_vec_vpkuwus,
+ &B_vec_vpkshus,
+ &B_vec_vpkswus,
+ &B1_vec_vperm,
+ &B2_vec_vperm,
+ &B3_vec_vperm,
+ &B4_vec_vperm,
+ &B5_vec_vperm,
+ &B6_vec_vperm,
+ &B7_vec_vperm,
+ &B8_vec_vperm,
+ &B9_vec_vperm,
+ &B10_vec_vperm,
+ &B11_vec_vperm,
+ &B_vec_vrefp,
+ &B1_vec_vrlh,
+ &B1_vec_vrlw,
+ &B1_vec_vrlb,
+ &B2_vec_vrlh,
+ &B2_vec_vrlw,
+ &B2_vec_vrlb,
+ &B_vec_vrfin,
+ &B_vec_vrsqrtefp,
+ &B1_vec_vsel,
+ &B2_vec_vsel,
+ &B3_vec_vsel,
+ &B4_vec_vsel,
+ &B5_vec_vsel,
+ &B6_vec_vsel,
+ &B7_vec_vsel,
+ &B8_vec_vsel,
+ &B9_vec_vsel,
+ &B10_vec_vsel,
+ &B11_vec_vsel,
+ &B12_vec_vsel,
+ &B13_vec_vsel,
+ &B14_vec_vsel,
+ &B15_vec_vsel,
+ &B16_vec_vsel,
+ &B17_vec_vsel,
+ &B18_vec_vsel,
+ &B19_vec_vsel,
+ &B20_vec_vsel,
+ &B1_vec_vslh,
+ &B1_vec_vslw,
+ &B1_vec_vslb,
+ &B2_vec_vslh,
+ &B2_vec_vslw,
+ &B2_vec_vslb,
+ &B1_vec_vsldoi,
+ &B2_vec_vsldoi,
+ &B3_vec_vsldoi,
+ &B4_vec_vsldoi,
+ &B5_vec_vsldoi,
+ &B6_vec_vsldoi,
+ &B7_vec_vsldoi,
+ &B8_vec_vsldoi,
+ &B9_vec_vsldoi,
+ &B10_vec_vsldoi,
+ &B11_vec_vsldoi,
+ &B1_vec_vsl,
+ &B2_vec_vsl,
+ &B3_vec_vsl,
+ &B4_vec_vsl,
+ &B5_vec_vsl,
+ &B6_vec_vsl,
+ &B7_vec_vsl,
+ &B8_vec_vsl,
+ &B9_vec_vsl,
+ &B10_vec_vsl,
+ &B11_vec_vsl,
+ &B12_vec_vsl,
+ &B13_vec_vsl,
+ &B14_vec_vsl,
+ &B15_vec_vsl,
+ &B16_vec_vsl,
+ &B17_vec_vsl,
+ &B18_vec_vsl,
+ &B19_vec_vsl,
+ &B20_vec_vsl,
+ &B21_vec_vsl,
+ &B22_vec_vsl,
+ &B23_vec_vsl,
+ &B24_vec_vsl,
+ &B25_vec_vsl,
+ &B26_vec_vsl,
+ &B27_vec_vsl,
+ &B28_vec_vsl,
+ &B29_vec_vsl,
+ &B30_vec_vsl,
+ &B1_vec_vslo,
+ &B2_vec_vslo,
+ &B3_vec_vslo,
+ &B4_vec_vslo,
+ &B5_vec_vslo,
+ &B6_vec_vslo,
+ &B7_vec_vslo,
+ &B8_vec_vslo,
+ &B9_vec_vslo,
+ &B10_vec_vslo,
+ &B11_vec_vslo,
+ &B12_vec_vslo,
+ &B13_vec_vslo,
+ &B14_vec_vslo,
+ &B15_vec_vslo,
+ &B16_vec_vslo,
+ &B1_vec_vsplth,
+ &B1_vec_vspltw,
+ &B1_vec_vspltb,
+ &B2_vec_vspltw,
+ &B2_vec_vsplth,
+ &B3_vec_vsplth,
+ &B3_vec_vspltw,
+ &B2_vec_vspltb,
+ &B4_vec_vsplth,
+ &B4_vec_vspltw,
+ &B3_vec_vspltb,
+ &B_vec_vspltish,
+ &B_vec_vspltisw,
+ &B_vec_vspltisb,
+ &B_vec_splat_u16,
+ &B_vec_splat_u32,
+ &B_vec_splat_u8,
+ &B1_vec_vsrh,
+ &B1_vec_vsrw,
+ &B1_vec_vsrb,
+ &B2_vec_vsrh,
+ &B2_vec_vsrw,
+ &B2_vec_vsrb,
+ &B1_vec_vsrah,
+ &B1_vec_vsraw,
+ &B1_vec_vsrab,
+ &B2_vec_vsrah,
+ &B2_vec_vsraw,
+ &B2_vec_vsrab,
+ &B1_vec_vsr,
+ &B2_vec_vsr,
+ &B3_vec_vsr,
+ &B4_vec_vsr,
+ &B5_vec_vsr,
+ &B6_vec_vsr,
+ &B7_vec_vsr,
+ &B8_vec_vsr,
+ &B9_vec_vsr,
+ &B10_vec_vsr,
+ &B11_vec_vsr,
+ &B12_vec_vsr,
+ &B13_vec_vsr,
+ &B14_vec_vsr,
+ &B15_vec_vsr,
+ &B16_vec_vsr,
+ &B17_vec_vsr,
+ &B18_vec_vsr,
+ &B19_vec_vsr,
+ &B20_vec_vsr,
+ &B21_vec_vsr,
+ &B22_vec_vsr,
+ &B23_vec_vsr,
+ &B24_vec_vsr,
+ &B25_vec_vsr,
+ &B26_vec_vsr,
+ &B27_vec_vsr,
+ &B28_vec_vsr,
+ &B29_vec_vsr,
+ &B30_vec_vsr,
+ &B1_vec_vsro,
+ &B2_vec_vsro,
+ &B3_vec_vsro,
+ &B4_vec_vsro,
+ &B5_vec_vsro,
+ &B6_vec_vsro,
+ &B7_vec_vsro,
+ &B8_vec_vsro,
+ &B9_vec_vsro,
+ &B10_vec_vsro,
+ &B11_vec_vsro,
+ &B12_vec_vsro,
+ &B13_vec_vsro,
+ &B14_vec_vsro,
+ &B15_vec_vsro,
+ &B16_vec_vsro,
+ &B1_vec_stvx,
+ &B2_vec_stvx,
+ &B3_vec_stvx,
+ &B4_vec_stvx,
+ &B5_vec_stvx,
+ &B6_vec_stvx,
+ &B7_vec_stvx,
+ &B8_vec_stvx,
+ &B9_vec_stvx,
+ &B10_vec_stvx,
+ &B11_vec_stvx,
+ &B12_vec_stvx,
+ &B13_vec_stvx,
+ &B14_vec_stvx,
+ &B15_vec_stvx,
+ &B16_vec_stvx,
+ &B17_vec_stvx,
+ &B18_vec_stvx,
+ &B19_vec_stvx,
+ &B20_vec_stvx,
+ &B21_vec_stvx,
+ &B22_vec_stvx,
+ &B23_vec_stvx,
+ &B24_vec_stvx,
+ &B25_vec_stvx,
+ &B26_vec_stvx,
+ &B27_vec_stvx,
+ &B28_vec_stvx,
+ &B29_vec_stvx,
+ &B30_vec_stvx,
+ &B1_vec_stvebx,
+ &B2_vec_stvebx,
+ &B1_vec_stvewx,
+ &B2_vec_stvewx,
+ &B3_vec_stvewx,
+ &B4_vec_stvewx,
+ &B3_vec_stvebx,
+ &B4_vec_stvebx,
+ &B5_vec_stvewx,
+ &B1_vec_stvehx,
+ &B2_vec_stvehx,
+ &B3_vec_stvehx,
+ &B6_vec_stvewx,
+ &B7_vec_stvewx,
+ &B5_vec_stvebx,
+ &B4_vec_stvehx,
+ &B8_vec_stvewx,
+ &B9_vec_stvewx,
+ &B6_vec_stvebx,
+ &B1_vec_stvxl,
+ &B2_vec_stvxl,
+ &B3_vec_stvxl,
+ &B4_vec_stvxl,
+ &B5_vec_stvxl,
+ &B6_vec_stvxl,
+ &B7_vec_stvxl,
+ &B8_vec_stvxl,
+ &B9_vec_stvxl,
+ &B10_vec_stvxl,
+ &B11_vec_stvxl,
+ &B12_vec_stvxl,
+ &B13_vec_stvxl,
+ &B14_vec_stvxl,
+ &B15_vec_stvxl,
+ &B16_vec_stvxl,
+ &B17_vec_stvxl,
+ &B18_vec_stvxl,
+ &B19_vec_stvxl,
+ &B20_vec_stvxl,
+ &B21_vec_stvxl,
+ &B22_vec_stvxl,
+ &B23_vec_stvxl,
+ &B24_vec_stvxl,
+ &B25_vec_stvxl,
+ &B26_vec_stvxl,
+ &B27_vec_stvxl,
+ &B28_vec_stvxl,
+ &B29_vec_stvxl,
+ &B30_vec_stvxl,
+ &B1_vec_vsubuhm,
+ &B2_vec_vsubuhm,
+ &B1_vec_vsubuwm,
+ &B2_vec_vsubuwm,
+ &B1_vec_vsububm,
+ &B2_vec_vsububm,
+ &B_vec_vsubfp,
+ &B3_vec_vsubuhm,
+ &B4_vec_vsubuhm,
+ &B3_vec_vsubuwm,
+ &B4_vec_vsubuwm,
+ &B3_vec_vsububm,
+ &B4_vec_vsububm,
+ &B5_vec_vsubuhm,
+ &B6_vec_vsubuhm,
+ &B5_vec_vsubuwm,
+ &B6_vec_vsubuwm,
+ &B5_vec_vsububm,
+ &B6_vec_vsububm,
+ &B_vec_vsubcuw,
+ &B1_vec_vsubshs,
+ &B1_vec_vsubuhs,
+ &B1_vec_vsubsws,
+ &B1_vec_vsubuws,
+ &B1_vec_vsubsbs,
+ &B1_vec_vsububs,
+ &B2_vec_vsubshs,
+ &B3_vec_vsubshs,
+ &B2_vec_vsubsws,
+ &B3_vec_vsubsws,
+ &B2_vec_vsubsbs,
+ &B3_vec_vsubsbs,
+ &B2_vec_vsubuhs,
+ &B3_vec_vsubuhs,
+ &B2_vec_vsubuws,
+ &B3_vec_vsubuws,
+ &B2_vec_vsububs,
+ &B3_vec_vsububs,
+ &B_vec_vsum2sws,
+ &B_vec_vsum4shs,
+ &B_vec_vsum4sbs,
+ &B_vec_vsum4ubs,
+ &B_vec_vsumsws,
+ &B_vec_vrfiz,
+ &B1_vec_unpack2sh,
+ &B2_vec_unpack2sh,
+ &B1_vec_unpack2sl,
+ &B2_vec_unpack2sl,
+ &B1_vec_unpack2uh,
+ &B2_vec_unpack2uh,
+ &B1_vec_unpack2ul,
+ &B2_vec_unpack2ul,
+ &B1_vec_vupkhsh,
+ &B1_vec_vupkhsb,
+ &B_vec_vupkhpx,
+ &B2_vec_vupkhsh,
+ &B2_vec_vupkhsb,
+ &B1_vec_vupklsh,
+ &B1_vec_vupklsb,
+ &B_vec_vupklpx,
+ &B2_vec_vupklsh,
+ &B2_vec_vupklsb,
+ &B1_vec_vxor,
+ &B2_vec_vxor,
+ &B3_vec_vxor,
+ &B4_vec_vxor,
+ &B5_vec_vxor,
+ &B6_vec_vxor,
+ &B7_vec_vxor,
+ &B8_vec_vxor,
+ &B9_vec_vxor,
+ &B10_vec_vxor,
+ &B11_vec_vxor,
+ &B12_vec_vxor,
+ &B13_vec_vxor,
+ &B14_vec_vxor,
+ &B15_vec_vxor,
+ &B16_vec_vxor,
+ &B17_vec_vxor,
+ &B18_vec_vxor,
+ &B19_vec_vxor,
+ &B20_vec_vxor,
+ &B21_vec_vxor,
+ &B22_vec_vxor,
+ &B23_vec_vxor,
+ &B24_vec_vxor,
+};
+
+static const struct builtin *const O_vec_abs[4] = {
+ &B1_vec_abs,
+ &B2_vec_abs,
+ &B3_vec_abs,
+ &B4_vec_abs,
+};
+static const struct builtin *const O_vec_abss[3] = {
+ &B1_vec_abss,
+ &B2_vec_abss,
+ &B3_vec_abss,
+};
+static const struct builtin *const O_vec_add[19] = {
+ &B1_vec_vadduhm,
+ &B2_vec_vadduhm,
+ &B1_vec_vadduwm,
+ &B2_vec_vadduwm,
+ &B1_vec_vaddubm,
+ &B2_vec_vaddubm,
+ &B_vec_vaddfp,
+ &B3_vec_vadduhm,
+ &B4_vec_vadduhm,
+ &B3_vec_vadduwm,
+ &B4_vec_vadduwm,
+ &B3_vec_vaddubm,
+ &B4_vec_vaddubm,
+ &B5_vec_vadduhm,
+ &B6_vec_vadduhm,
+ &B5_vec_vadduwm,
+ &B6_vec_vadduwm,
+ &B5_vec_vaddubm,
+ &B6_vec_vaddubm,
+};
+static const struct builtin *const O_vec_addc[1] = {
+ &B_vec_vaddcuw,
+};
+static const struct builtin *const O_vec_adds[18] = {
+ &B1_vec_vaddshs,
+ &B1_vec_vadduhs,
+ &B1_vec_vaddsws,
+ &B1_vec_vadduws,
+ &B1_vec_vaddsbs,
+ &B1_vec_vaddubs,
+ &B2_vec_vaddshs,
+ &B3_vec_vaddshs,
+ &B2_vec_vaddsws,
+ &B3_vec_vaddsws,
+ &B2_vec_vaddsbs,
+ &B3_vec_vaddsbs,
+ &B2_vec_vadduhs,
+ &B3_vec_vadduhs,
+ &B2_vec_vadduws,
+ &B3_vec_vadduws,
+ &B2_vec_vaddubs,
+ &B3_vec_vaddubs,
+};
+static const struct builtin *const O_vec_all_eq[23] = {
+ &B1_vec_all_eq,
+ &B2_vec_all_eq,
+ &B3_vec_all_eq,
+ &B4_vec_all_eq,
+ &B5_vec_all_eq,
+ &B6_vec_all_eq,
+ &B7_vec_all_eq,
+ &B8_vec_all_eq,
+ &B9_vec_all_eq,
+ &B10_vec_all_eq,
+ &B11_vec_all_eq,
+ &B12_vec_all_eq,
+ &B13_vec_all_eq,
+ &B14_vec_all_eq,
+ &B15_vec_all_eq,
+ &B16_vec_all_eq,
+ &B17_vec_all_eq,
+ &B18_vec_all_eq,
+ &B19_vec_all_eq,
+ &B20_vec_all_eq,
+ &B21_vec_all_eq,
+ &B22_vec_all_eq,
+ &B23_vec_all_eq,
+};
+static const struct builtin *const O_vec_all_ge[19] = {
+ &B1_vec_all_ge,
+ &B2_vec_all_ge,
+ &B3_vec_all_ge,
+ &B4_vec_all_ge,
+ &B5_vec_all_ge,
+ &B6_vec_all_ge,
+ &B7_vec_all_ge,
+ &B8_vec_all_ge,
+ &B9_vec_all_ge,
+ &B10_vec_all_ge,
+ &B11_vec_all_ge,
+ &B12_vec_all_ge,
+ &B13_vec_all_ge,
+ &B14_vec_all_ge,
+ &B15_vec_all_ge,
+ &B16_vec_all_ge,
+ &B17_vec_all_ge,
+ &B18_vec_all_ge,
+ &B19_vec_all_ge,
+};
+static const struct builtin *const O_vec_all_gt[19] = {
+ &B1_vec_all_gt,
+ &B2_vec_all_gt,
+ &B3_vec_all_gt,
+ &B4_vec_all_gt,
+ &B5_vec_all_gt,
+ &B6_vec_all_gt,
+ &B7_vec_all_gt,
+ &B8_vec_all_gt,
+ &B9_vec_all_gt,
+ &B10_vec_all_gt,
+ &B11_vec_all_gt,
+ &B12_vec_all_gt,
+ &B13_vec_all_gt,
+ &B14_vec_all_gt,
+ &B15_vec_all_gt,
+ &B16_vec_all_gt,
+ &B17_vec_all_gt,
+ &B18_vec_all_gt,
+ &B19_vec_all_gt,
+};
+static const struct builtin *const O_vec_all_in[1] = {
+ &B_vec_all_in,
+};
+static const struct builtin *const O_vec_all_le[19] = {
+ &B1_vec_all_le,
+ &B2_vec_all_le,
+ &B3_vec_all_le,
+ &B4_vec_all_le,
+ &B5_vec_all_le,
+ &B6_vec_all_le,
+ &B7_vec_all_le,
+ &B8_vec_all_le,
+ &B9_vec_all_le,
+ &B10_vec_all_le,
+ &B11_vec_all_le,
+ &B12_vec_all_le,
+ &B13_vec_all_le,
+ &B14_vec_all_le,
+ &B15_vec_all_le,
+ &B16_vec_all_le,
+ &B17_vec_all_le,
+ &B18_vec_all_le,
+ &B19_vec_all_le,
+};
+static const struct builtin *const O_vec_all_lt[19] = {
+ &B1_vec_all_lt,
+ &B2_vec_all_lt,
+ &B3_vec_all_lt,
+ &B4_vec_all_lt,
+ &B5_vec_all_lt,
+ &B6_vec_all_lt,
+ &B7_vec_all_lt,
+ &B8_vec_all_lt,
+ &B9_vec_all_lt,
+ &B10_vec_all_lt,
+ &B11_vec_all_lt,
+ &B12_vec_all_lt,
+ &B13_vec_all_lt,
+ &B14_vec_all_lt,
+ &B15_vec_all_lt,
+ &B16_vec_all_lt,
+ &B17_vec_all_lt,
+ &B18_vec_all_lt,
+ &B19_vec_all_lt,
+};
+static const struct builtin *const O_vec_all_nan[1] = {
+ &B_vec_all_nan,
+};
+static const struct builtin *const O_vec_all_ne[23] = {
+ &B1_vec_all_ne,
+ &B2_vec_all_ne,
+ &B3_vec_all_ne,
+ &B4_vec_all_ne,
+ &B5_vec_all_ne,
+ &B6_vec_all_ne,
+ &B7_vec_all_ne,
+ &B8_vec_all_ne,
+ &B9_vec_all_ne,
+ &B10_vec_all_ne,
+ &B11_vec_all_ne,
+ &B12_vec_all_ne,
+ &B13_vec_all_ne,
+ &B14_vec_all_ne,
+ &B15_vec_all_ne,
+ &B16_vec_all_ne,
+ &B17_vec_all_ne,
+ &B18_vec_all_ne,
+ &B19_vec_all_ne,
+ &B20_vec_all_ne,
+ &B21_vec_all_ne,
+ &B22_vec_all_ne,
+ &B23_vec_all_ne,
+};
+static const struct builtin *const O_vec_all_nge[1] = {
+ &B_vec_all_nge,
+};
+static const struct builtin *const O_vec_all_ngt[1] = {
+ &B_vec_all_ngt,
+};
+static const struct builtin *const O_vec_all_nle[1] = {
+ &B_vec_all_nle,
+};
+static const struct builtin *const O_vec_all_nlt[1] = {
+ &B_vec_all_nlt,
+};
+static const struct builtin *const O_vec_all_numeric[1] = {
+ &B_vec_all_numeric,
+};
+static const struct builtin *const O_vec_and[24] = {
+ &B1_vec_vand,
+ &B2_vec_vand,
+ &B3_vec_vand,
+ &B4_vec_vand,
+ &B5_vec_vand,
+ &B6_vec_vand,
+ &B7_vec_vand,
+ &B8_vec_vand,
+ &B9_vec_vand,
+ &B10_vec_vand,
+ &B11_vec_vand,
+ &B12_vec_vand,
+ &B13_vec_vand,
+ &B14_vec_vand,
+ &B15_vec_vand,
+ &B16_vec_vand,
+ &B17_vec_vand,
+ &B18_vec_vand,
+ &B19_vec_vand,
+ &B20_vec_vand,
+ &B21_vec_vand,
+ &B22_vec_vand,
+ &B23_vec_vand,
+ &B24_vec_vand,
+};
+static const struct builtin *const O_vec_andc[24] = {
+ &B1_vec_vandc,
+ &B2_vec_vandc,
+ &B3_vec_vandc,
+ &B4_vec_vandc,
+ &B5_vec_vandc,
+ &B6_vec_vandc,
+ &B7_vec_vandc,
+ &B8_vec_vandc,
+ &B9_vec_vandc,
+ &B10_vec_vandc,
+ &B11_vec_vandc,
+ &B12_vec_vandc,
+ &B13_vec_vandc,
+ &B14_vec_vandc,
+ &B15_vec_vandc,
+ &B16_vec_vandc,
+ &B17_vec_vandc,
+ &B18_vec_vandc,
+ &B19_vec_vandc,
+ &B20_vec_vandc,
+ &B21_vec_vandc,
+ &B22_vec_vandc,
+ &B23_vec_vandc,
+ &B24_vec_vandc,
+};
+static const struct builtin *const O_vec_any_eq[23] = {
+ &B1_vec_any_eq,
+ &B2_vec_any_eq,
+ &B3_vec_any_eq,
+ &B4_vec_any_eq,
+ &B5_vec_any_eq,
+ &B6_vec_any_eq,
+ &B7_vec_any_eq,
+ &B8_vec_any_eq,
+ &B9_vec_any_eq,
+ &B10_vec_any_eq,
+ &B11_vec_any_eq,
+ &B12_vec_any_eq,
+ &B13_vec_any_eq,
+ &B14_vec_any_eq,
+ &B15_vec_any_eq,
+ &B16_vec_any_eq,
+ &B17_vec_any_eq,
+ &B18_vec_any_eq,
+ &B19_vec_any_eq,
+ &B20_vec_any_eq,
+ &B21_vec_any_eq,
+ &B22_vec_any_eq,
+ &B23_vec_any_eq,
+};
+static const struct builtin *const O_vec_any_ge[19] = {
+ &B1_vec_any_ge,
+ &B2_vec_any_ge,
+ &B3_vec_any_ge,
+ &B4_vec_any_ge,
+ &B5_vec_any_ge,
+ &B6_vec_any_ge,
+ &B7_vec_any_ge,
+ &B8_vec_any_ge,
+ &B9_vec_any_ge,
+ &B10_vec_any_ge,
+ &B11_vec_any_ge,
+ &B12_vec_any_ge,
+ &B13_vec_any_ge,
+ &B14_vec_any_ge,
+ &B15_vec_any_ge,
+ &B16_vec_any_ge,
+ &B17_vec_any_ge,
+ &B18_vec_any_ge,
+ &B19_vec_any_ge,
+};
+static const struct builtin *const O_vec_any_gt[19] = {
+ &B1_vec_any_gt,
+ &B2_vec_any_gt,
+ &B3_vec_any_gt,
+ &B4_vec_any_gt,
+ &B5_vec_any_gt,
+ &B6_vec_any_gt,
+ &B7_vec_any_gt,
+ &B8_vec_any_gt,
+ &B9_vec_any_gt,
+ &B10_vec_any_gt,
+ &B11_vec_any_gt,
+ &B12_vec_any_gt,
+ &B13_vec_any_gt,
+ &B14_vec_any_gt,
+ &B15_vec_any_gt,
+ &B16_vec_any_gt,
+ &B17_vec_any_gt,
+ &B18_vec_any_gt,
+ &B19_vec_any_gt,
+};
+static const struct builtin *const O_vec_any_le[19] = {
+ &B1_vec_any_le,
+ &B2_vec_any_le,
+ &B3_vec_any_le,
+ &B4_vec_any_le,
+ &B5_vec_any_le,
+ &B6_vec_any_le,
+ &B7_vec_any_le,
+ &B8_vec_any_le,
+ &B9_vec_any_le,
+ &B10_vec_any_le,
+ &B11_vec_any_le,
+ &B12_vec_any_le,
+ &B13_vec_any_le,
+ &B14_vec_any_le,
+ &B15_vec_any_le,
+ &B16_vec_any_le,
+ &B17_vec_any_le,
+ &B18_vec_any_le,
+ &B19_vec_any_le,
+};
+static const struct builtin *const O_vec_any_lt[19] = {
+ &B1_vec_any_lt,
+ &B2_vec_any_lt,
+ &B3_vec_any_lt,
+ &B4_vec_any_lt,
+ &B5_vec_any_lt,
+ &B6_vec_any_lt,
+ &B7_vec_any_lt,
+ &B8_vec_any_lt,
+ &B9_vec_any_lt,
+ &B10_vec_any_lt,
+ &B11_vec_any_lt,
+ &B12_vec_any_lt,
+ &B13_vec_any_lt,
+ &B14_vec_any_lt,
+ &B15_vec_any_lt,
+ &B16_vec_any_lt,
+ &B17_vec_any_lt,
+ &B18_vec_any_lt,
+ &B19_vec_any_lt,
+};
+static const struct builtin *const O_vec_any_nan[1] = {
+ &B_vec_any_nan,
+};
+static const struct builtin *const O_vec_any_ne[23] = {
+ &B1_vec_any_ne,
+ &B2_vec_any_ne,
+ &B3_vec_any_ne,
+ &B4_vec_any_ne,
+ &B5_vec_any_ne,
+ &B6_vec_any_ne,
+ &B7_vec_any_ne,
+ &B8_vec_any_ne,
+ &B9_vec_any_ne,
+ &B10_vec_any_ne,
+ &B11_vec_any_ne,
+ &B12_vec_any_ne,
+ &B13_vec_any_ne,
+ &B14_vec_any_ne,
+ &B15_vec_any_ne,
+ &B16_vec_any_ne,
+ &B17_vec_any_ne,
+ &B18_vec_any_ne,
+ &B19_vec_any_ne,
+ &B20_vec_any_ne,
+ &B21_vec_any_ne,
+ &B22_vec_any_ne,
+ &B23_vec_any_ne,
+};
+static const struct builtin *const O_vec_any_nge[1] = {
+ &B_vec_any_nge,
+};
+static const struct builtin *const O_vec_any_ngt[1] = {
+ &B_vec_any_ngt,
+};
+static const struct builtin *const O_vec_any_nle[1] = {
+ &B_vec_any_nle,
+};
+static const struct builtin *const O_vec_any_nlt[1] = {
+ &B_vec_any_nlt,
+};
+static const struct builtin *const O_vec_any_numeric[1] = {
+ &B_vec_any_numeric,
+};
+static const struct builtin *const O_vec_any_out[1] = {
+ &B_vec_any_out,
+};
+static const struct builtin *const O_vec_avg[6] = {
+ &B_vec_vavgsh,
+ &B_vec_vavgsw,
+ &B_vec_vavgsb,
+ &B_vec_vavguh,
+ &B_vec_vavguw,
+ &B_vec_vavgub,
+};
+static const struct builtin *const O_vec_ceil[1] = {
+ &B_vec_vrfip,
+};
+static const struct builtin *const O_vec_cmpb[1] = {
+ &B_vec_vcmpbfp,
+};
+static const struct builtin *const O_vec_cmpeq[7] = {
+ &B_vec_vcmpeqfp,
+ &B1_vec_vcmpequh,
+ &B1_vec_vcmpequw,
+ &B1_vec_vcmpequb,
+ &B2_vec_vcmpequh,
+ &B2_vec_vcmpequw,
+ &B2_vec_vcmpequb,
+};
+static const struct builtin *const O_vec_cmpge[1] = {
+ &B_vec_vcmpgefp,
+};
+static const struct builtin *const O_vec_cmpgt[7] = {
+ &B_vec_vcmpgtfp,
+ &B_vec_vcmpgtsh,
+ &B_vec_vcmpgtsw,
+ &B_vec_vcmpgtsb,
+ &B_vec_vcmpgtuh,
+ &B_vec_vcmpgtuw,
+ &B_vec_vcmpgtub,
+};
+static const struct builtin *const O_vec_cmple[1] = {
+ &B_vec_cmple,
+};
+static const struct builtin *const O_vec_cmplt[7] = {
+ &B1_vec_cmplt,
+ &B2_vec_cmplt,
+ &B3_vec_cmplt,
+ &B4_vec_cmplt,
+ &B5_vec_cmplt,
+ &B6_vec_cmplt,
+ &B7_vec_cmplt,
+};
+static const struct builtin *const O_vec_ctf[2] = {
+ &B_vec_vcfsx,
+ &B_vec_vcfux,
+};
+static const struct builtin *const O_vec_cts[1] = {
+ &B_vec_vctsxs,
+};
+static const struct builtin *const O_vec_ctu[1] = {
+ &B_vec_vctuxs,
+};
+static const struct builtin *const O_vec_dss[1] = {
+ &B_vec_dss,
+};
+static const struct builtin *const O_vec_dssall[1] = {
+ &B_vec_dssall,
+};
+static const struct builtin *const O_vec_dst[20] = {
+ &B1_vec_dst,
+ &B2_vec_dst,
+ &B3_vec_dst,
+ &B4_vec_dst,
+ &B5_vec_dst,
+ &B6_vec_dst,
+ &B7_vec_dst,
+ &B8_vec_dst,
+ &B9_vec_dst,
+ &B10_vec_dst,
+ &B11_vec_dst,
+ &B12_vec_dst,
+ &B13_vec_dst,
+ &B14_vec_dst,
+ &B15_vec_dst,
+ &B16_vec_dst,
+ &B17_vec_dst,
+ &B18_vec_dst,
+ &B19_vec_dst,
+ &B20_vec_dst,
+};
+static const struct builtin *const O_vec_dstst[20] = {
+ &B1_vec_dstst,
+ &B2_vec_dstst,
+ &B3_vec_dstst,
+ &B4_vec_dstst,
+ &B5_vec_dstst,
+ &B6_vec_dstst,
+ &B7_vec_dstst,
+ &B8_vec_dstst,
+ &B9_vec_dstst,
+ &B10_vec_dstst,
+ &B11_vec_dstst,
+ &B12_vec_dstst,
+ &B13_vec_dstst,
+ &B14_vec_dstst,
+ &B15_vec_dstst,
+ &B16_vec_dstst,
+ &B17_vec_dstst,
+ &B18_vec_dstst,
+ &B19_vec_dstst,
+ &B20_vec_dstst,
+};
+static const struct builtin *const O_vec_dststt[20] = {
+ &B1_vec_dststt,
+ &B2_vec_dststt,
+ &B3_vec_dststt,
+ &B4_vec_dststt,
+ &B5_vec_dststt,
+ &B6_vec_dststt,
+ &B7_vec_dststt,
+ &B8_vec_dststt,
+ &B9_vec_dststt,
+ &B10_vec_dststt,
+ &B11_vec_dststt,
+ &B12_vec_dststt,
+ &B13_vec_dststt,
+ &B14_vec_dststt,
+ &B15_vec_dststt,
+ &B16_vec_dststt,
+ &B17_vec_dststt,
+ &B18_vec_dststt,
+ &B19_vec_dststt,
+ &B20_vec_dststt,
+};
+static const struct builtin *const O_vec_dstt[20] = {
+ &B1_vec_dstt,
+ &B2_vec_dstt,
+ &B3_vec_dstt,
+ &B4_vec_dstt,
+ &B5_vec_dstt,
+ &B6_vec_dstt,
+ &B7_vec_dstt,
+ &B8_vec_dstt,
+ &B9_vec_dstt,
+ &B10_vec_dstt,
+ &B11_vec_dstt,
+ &B12_vec_dstt,
+ &B13_vec_dstt,
+ &B14_vec_dstt,
+ &B15_vec_dstt,
+ &B16_vec_dstt,
+ &B17_vec_dstt,
+ &B18_vec_dstt,
+ &B19_vec_dstt,
+ &B20_vec_dstt,
+};
+static const struct builtin *const O_vec_expte[1] = {
+ &B_vec_vexptefp,
+};
+static const struct builtin *const O_vec_floor[1] = {
+ &B_vec_vrfim,
+};
+static const struct builtin *const O_vec_ld[20] = {
+ &B1_vec_lvx,
+ &B2_vec_lvx,
+ &B3_vec_lvx,
+ &B4_vec_lvx,
+ &B5_vec_lvx,
+ &B6_vec_lvx,
+ &B7_vec_lvx,
+ &B8_vec_lvx,
+ &B9_vec_lvx,
+ &B10_vec_lvx,
+ &B11_vec_lvx,
+ &B12_vec_lvx,
+ &B13_vec_lvx,
+ &B14_vec_lvx,
+ &B15_vec_lvx,
+ &B16_vec_lvx,
+ &B17_vec_lvx,
+ &B18_vec_lvx,
+ &B19_vec_lvx,
+ &B20_vec_lvx,
+};
+static const struct builtin *const O_vec_lde[9] = {
+ &B1_vec_lvewx,
+ &B2_vec_lvewx,
+ &B3_vec_lvewx,
+ &B1_vec_lvehx,
+ &B1_vec_lvebx,
+ &B2_vec_lvebx,
+ &B4_vec_lvewx,
+ &B5_vec_lvewx,
+ &B2_vec_lvehx,
+};
+static const struct builtin *const O_vec_ldl[20] = {
+ &B1_vec_lvxl,
+ &B2_vec_lvxl,
+ &B3_vec_lvxl,
+ &B4_vec_lvxl,
+ &B5_vec_lvxl,
+ &B6_vec_lvxl,
+ &B7_vec_lvxl,
+ &B8_vec_lvxl,
+ &B9_vec_lvxl,
+ &B10_vec_lvxl,
+ &B11_vec_lvxl,
+ &B12_vec_lvxl,
+ &B13_vec_lvxl,
+ &B14_vec_lvxl,
+ &B15_vec_lvxl,
+ &B16_vec_lvxl,
+ &B17_vec_lvxl,
+ &B18_vec_lvxl,
+ &B19_vec_lvxl,
+ &B20_vec_lvxl,
+};
+static const struct builtin *const O_vec_loge[1] = {
+ &B_vec_vlogefp,
+};
+static const struct builtin *const O_vec_lvebx[2] = {
+ &B1_vec_lvebx,
+ &B2_vec_lvebx,
+};
+static const struct builtin *const O_vec_lvehx[2] = {
+ &B1_vec_lvehx,
+ &B2_vec_lvehx,
+};
+static const struct builtin *const O_vec_lvewx[5] = {
+ &B1_vec_lvewx,
+ &B2_vec_lvewx,
+ &B3_vec_lvewx,
+ &B4_vec_lvewx,
+ &B5_vec_lvewx,
+};
+static const struct builtin *const O_vec_lvsl[9] = {
+ &B1_vec_lvsl,
+ &B2_vec_lvsl,
+ &B3_vec_lvsl,
+ &B4_vec_lvsl,
+ &B5_vec_lvsl,
+ &B6_vec_lvsl,
+ &B7_vec_lvsl,
+ &B8_vec_lvsl,
+ &B9_vec_lvsl,
+};
+static const struct builtin *const O_vec_lvsr[9] = {
+ &B1_vec_lvsr,
+ &B2_vec_lvsr,
+ &B3_vec_lvsr,
+ &B4_vec_lvsr,
+ &B5_vec_lvsr,
+ &B6_vec_lvsr,
+ &B7_vec_lvsr,
+ &B8_vec_lvsr,
+ &B9_vec_lvsr,
+};
+static const struct builtin *const O_vec_lvx[20] = {
+ &B1_vec_lvx,
+ &B2_vec_lvx,
+ &B3_vec_lvx,
+ &B4_vec_lvx,
+ &B5_vec_lvx,
+ &B6_vec_lvx,
+ &B7_vec_lvx,
+ &B8_vec_lvx,
+ &B9_vec_lvx,
+ &B10_vec_lvx,
+ &B11_vec_lvx,
+ &B12_vec_lvx,
+ &B13_vec_lvx,
+ &B14_vec_lvx,
+ &B15_vec_lvx,
+ &B16_vec_lvx,
+ &B17_vec_lvx,
+ &B18_vec_lvx,
+ &B19_vec_lvx,
+ &B20_vec_lvx,
+};
+static const struct builtin *const O_vec_lvxl[20] = {
+ &B1_vec_lvxl,
+ &B2_vec_lvxl,
+ &B3_vec_lvxl,
+ &B4_vec_lvxl,
+ &B5_vec_lvxl,
+ &B6_vec_lvxl,
+ &B7_vec_lvxl,
+ &B8_vec_lvxl,
+ &B9_vec_lvxl,
+ &B10_vec_lvxl,
+ &B11_vec_lvxl,
+ &B12_vec_lvxl,
+ &B13_vec_lvxl,
+ &B14_vec_lvxl,
+ &B15_vec_lvxl,
+ &B16_vec_lvxl,
+ &B17_vec_lvxl,
+ &B18_vec_lvxl,
+ &B19_vec_lvxl,
+ &B20_vec_lvxl,
+};
+static const struct builtin *const O_vec_madd[1] = {
+ &B_vec_vmaddfp,
+};
+static const struct builtin *const O_vec_madds[1] = {
+ &B_vec_vmhaddshs,
+};
+static const struct builtin *const O_vec_max[19] = {
+ &B1_vec_vmaxsh,
+ &B1_vec_vmaxuh,
+ &B1_vec_vmaxsw,
+ &B1_vec_vmaxuw,
+ &B1_vec_vmaxsb,
+ &B1_vec_vmaxub,
+ &B_vec_vmaxfp,
+ &B2_vec_vmaxsh,
+ &B3_vec_vmaxsh,
+ &B2_vec_vmaxsw,
+ &B3_vec_vmaxsw,
+ &B2_vec_vmaxsb,
+ &B3_vec_vmaxsb,
+ &B2_vec_vmaxuh,
+ &B3_vec_vmaxuh,
+ &B2_vec_vmaxuw,
+ &B3_vec_vmaxuw,
+ &B2_vec_vmaxub,
+ &B3_vec_vmaxub,
+};
+static const struct builtin *const O_vec_mergeh[11] = {
+ &B1_vec_vmrghh,
+ &B1_vec_vmrghw,
+ &B1_vec_vmrghb,
+ &B2_vec_vmrghw,
+ &B2_vec_vmrghh,
+ &B3_vec_vmrghh,
+ &B3_vec_vmrghw,
+ &B2_vec_vmrghb,
+ &B4_vec_vmrghh,
+ &B4_vec_vmrghw,
+ &B3_vec_vmrghb,
+};
+static const struct builtin *const O_vec_mergel[11] = {
+ &B1_vec_vmrglh,
+ &B1_vec_vmrglw,
+ &B1_vec_vmrglb,
+ &B2_vec_vmrglw,
+ &B2_vec_vmrglh,
+ &B3_vec_vmrglh,
+ &B3_vec_vmrglw,
+ &B2_vec_vmrglb,
+ &B4_vec_vmrglh,
+ &B4_vec_vmrglw,
+ &B3_vec_vmrglb,
+};
+static const struct builtin *const O_vec_mfvscr[1] = {
+ &B_vec_mfvscr,
+};
+static const struct builtin *const O_vec_min[19] = {
+ &B1_vec_vminsh,
+ &B1_vec_vminuh,
+ &B1_vec_vminsw,
+ &B1_vec_vminuw,
+ &B1_vec_vminsb,
+ &B1_vec_vminub,
+ &B_vec_vminfp,
+ &B2_vec_vminsh,
+ &B3_vec_vminsh,
+ &B2_vec_vminsw,
+ &B3_vec_vminsw,
+ &B2_vec_vminsb,
+ &B3_vec_vminsb,
+ &B2_vec_vminuh,
+ &B3_vec_vminuh,
+ &B2_vec_vminuw,
+ &B3_vec_vminuw,
+ &B2_vec_vminub,
+ &B3_vec_vminub,
+};
+static const struct builtin *const O_vec_mladd[4] = {
+ &B1_vec_vmladduhm,
+ &B2_vec_vmladduhm,
+ &B3_vec_vmladduhm,
+ &B4_vec_vmladduhm,
+};
+static const struct builtin *const O_vec_mradds[1] = {
+ &B_vec_vmhraddshs,
+};
+static const struct builtin *const O_vec_msum[4] = {
+ &B_vec_vmsumshm,
+ &B_vec_vmsummbm,
+ &B_vec_vmsumuhm,
+ &B_vec_vmsumubm,
+};
+static const struct builtin *const O_vec_msums[2] = {
+ &B_vec_vmsumshs,
+ &B_vec_vmsumuhs,
+};
+static const struct builtin *const O_vec_mtvscr[10] = {
+ &B1_vec_mtvscr,
+ &B2_vec_mtvscr,
+ &B3_vec_mtvscr,
+ &B4_vec_mtvscr,
+ &B5_vec_mtvscr,
+ &B6_vec_mtvscr,
+ &B7_vec_mtvscr,
+ &B8_vec_mtvscr,
+ &B9_vec_mtvscr,
+ &B10_vec_mtvscr,
+};
+static const struct builtin *const O_vec_mule[4] = {
+ &B_vec_vmulesh,
+ &B_vec_vmulesb,
+ &B_vec_vmuleuh,
+ &B_vec_vmuleub,
+};
+static const struct builtin *const O_vec_mulo[4] = {
+ &B_vec_vmulosh,
+ &B_vec_vmulosb,
+ &B_vec_vmulouh,
+ &B_vec_vmuloub,
+};
+static const struct builtin *const O_vec_nmsub[1] = {
+ &B_vec_vnmsubfp,
+};
+static const struct builtin *const O_vec_nor[10] = {
+ &B1_vec_vnor,
+ &B2_vec_vnor,
+ &B3_vec_vnor,
+ &B4_vec_vnor,
+ &B5_vec_vnor,
+ &B6_vec_vnor,
+ &B7_vec_vnor,
+ &B8_vec_vnor,
+ &B9_vec_vnor,
+ &B10_vec_vnor,
+};
+static const struct builtin *const O_vec_or[24] = {
+ &B1_vec_vor,
+ &B2_vec_vor,
+ &B3_vec_vor,
+ &B4_vec_vor,
+ &B5_vec_vor,
+ &B6_vec_vor,
+ &B7_vec_vor,
+ &B8_vec_vor,
+ &B9_vec_vor,
+ &B10_vec_vor,
+ &B11_vec_vor,
+ &B12_vec_vor,
+ &B13_vec_vor,
+ &B14_vec_vor,
+ &B15_vec_vor,
+ &B16_vec_vor,
+ &B17_vec_vor,
+ &B18_vec_vor,
+ &B19_vec_vor,
+ &B20_vec_vor,
+ &B21_vec_vor,
+ &B22_vec_vor,
+ &B23_vec_vor,
+ &B24_vec_vor,
+};
+static const struct builtin *const O_vec_pack[6] = {
+ &B1_vec_vpkuhum,
+ &B1_vec_vpkuwum,
+ &B2_vec_vpkuhum,
+ &B2_vec_vpkuwum,
+ &B3_vec_vpkuhum,
+ &B3_vec_vpkuwum,
+};
+static const struct builtin *const O_vec_packpx[1] = {
+ &B_vec_vpkpx,
+};
+static const struct builtin *const O_vec_packs[4] = {
+ &B_vec_vpkshss,
+ &B_vec_vpkswss,
+ &B_vec_vpkuhus,
+ &B_vec_vpkuwus,
+};
+static const struct builtin *const O_vec_packsu[4] = {
+ &B_vec_vpkshus,
+ &B_vec_vpkswus,
+ &B_vec_vpkuhus,
+ &B_vec_vpkuwus,
+};
+static const struct builtin *const O_vec_perm[11] = {
+ &B1_vec_vperm,
+ &B2_vec_vperm,
+ &B3_vec_vperm,
+ &B4_vec_vperm,
+ &B5_vec_vperm,
+ &B6_vec_vperm,
+ &B7_vec_vperm,
+ &B8_vec_vperm,
+ &B9_vec_vperm,
+ &B10_vec_vperm,
+ &B11_vec_vperm,
+};
+static const struct builtin *const O_vec_re[1] = {
+ &B_vec_vrefp,
+};
+static const struct builtin *const O_vec_rl[6] = {
+ &B1_vec_vrlh,
+ &B1_vec_vrlw,
+ &B1_vec_vrlb,
+ &B2_vec_vrlh,
+ &B2_vec_vrlw,
+ &B2_vec_vrlb,
+};
+static const struct builtin *const O_vec_round[1] = {
+ &B_vec_vrfin,
+};
+static const struct builtin *const O_vec_rsqrte[1] = {
+ &B_vec_vrsqrtefp,
+};
+static const struct builtin *const O_vec_sel[20] = {
+ &B1_vec_vsel,
+ &B2_vec_vsel,
+ &B3_vec_vsel,
+ &B4_vec_vsel,
+ &B5_vec_vsel,
+ &B6_vec_vsel,
+ &B7_vec_vsel,
+ &B8_vec_vsel,
+ &B9_vec_vsel,
+ &B10_vec_vsel,
+ &B11_vec_vsel,
+ &B12_vec_vsel,
+ &B13_vec_vsel,
+ &B14_vec_vsel,
+ &B15_vec_vsel,
+ &B16_vec_vsel,
+ &B17_vec_vsel,
+ &B18_vec_vsel,
+ &B19_vec_vsel,
+ &B20_vec_vsel,
+};
+static const struct builtin *const O_vec_sl[6] = {
+ &B1_vec_vslh,
+ &B1_vec_vslw,
+ &B1_vec_vslb,
+ &B2_vec_vslh,
+ &B2_vec_vslw,
+ &B2_vec_vslb,
+};
+static const struct builtin *const O_vec_sld[11] = {
+ &B1_vec_vsldoi,
+ &B2_vec_vsldoi,
+ &B3_vec_vsldoi,
+ &B4_vec_vsldoi,
+ &B5_vec_vsldoi,
+ &B6_vec_vsldoi,
+ &B7_vec_vsldoi,
+ &B8_vec_vsldoi,
+ &B9_vec_vsldoi,
+ &B10_vec_vsldoi,
+ &B11_vec_vsldoi,
+};
+static const struct builtin *const O_vec_sll[30] = {
+ &B1_vec_vsl,
+ &B2_vec_vsl,
+ &B3_vec_vsl,
+ &B4_vec_vsl,
+ &B5_vec_vsl,
+ &B6_vec_vsl,
+ &B7_vec_vsl,
+ &B8_vec_vsl,
+ &B9_vec_vsl,
+ &B10_vec_vsl,
+ &B11_vec_vsl,
+ &B12_vec_vsl,
+ &B13_vec_vsl,
+ &B14_vec_vsl,
+ &B15_vec_vsl,
+ &B16_vec_vsl,
+ &B17_vec_vsl,
+ &B18_vec_vsl,
+ &B19_vec_vsl,
+ &B20_vec_vsl,
+ &B21_vec_vsl,
+ &B22_vec_vsl,
+ &B23_vec_vsl,
+ &B24_vec_vsl,
+ &B25_vec_vsl,
+ &B26_vec_vsl,
+ &B27_vec_vsl,
+ &B28_vec_vsl,
+ &B29_vec_vsl,
+ &B30_vec_vsl,
+};
+static const struct builtin *const O_vec_slo[16] = {
+ &B1_vec_vslo,
+ &B2_vec_vslo,
+ &B3_vec_vslo,
+ &B4_vec_vslo,
+ &B5_vec_vslo,
+ &B6_vec_vslo,
+ &B7_vec_vslo,
+ &B8_vec_vslo,
+ &B9_vec_vslo,
+ &B10_vec_vslo,
+ &B11_vec_vslo,
+ &B12_vec_vslo,
+ &B13_vec_vslo,
+ &B14_vec_vslo,
+ &B15_vec_vslo,
+ &B16_vec_vslo,
+};
+static const struct builtin *const O_vec_splat[11] = {
+ &B1_vec_vsplth,
+ &B1_vec_vspltw,
+ &B1_vec_vspltb,
+ &B2_vec_vspltw,
+ &B2_vec_vsplth,
+ &B3_vec_vsplth,
+ &B3_vec_vspltw,
+ &B2_vec_vspltb,
+ &B4_vec_vsplth,
+ &B4_vec_vspltw,
+ &B3_vec_vspltb,
+};
+static const struct builtin *const O_vec_splat_s16[1] = {
+ &B_vec_vspltish,
+};
+static const struct builtin *const O_vec_splat_s32[1] = {
+ &B_vec_vspltisw,
+};
+static const struct builtin *const O_vec_splat_s8[1] = {
+ &B_vec_vspltisb,
+};
+static const struct builtin *const O_vec_splat_u16[1] = {
+ &B_vec_splat_u16,
+};
+static const struct builtin *const O_vec_splat_u32[1] = {
+ &B_vec_splat_u32,
+};
+static const struct builtin *const O_vec_splat_u8[1] = {
+ &B_vec_splat_u8,
+};
+static const struct builtin *const O_vec_sr[6] = {
+ &B1_vec_vsrh,
+ &B1_vec_vsrw,
+ &B1_vec_vsrb,
+ &B2_vec_vsrh,
+ &B2_vec_vsrw,
+ &B2_vec_vsrb,
+};
+static const struct builtin *const O_vec_sra[6] = {
+ &B1_vec_vsrah,
+ &B1_vec_vsraw,
+ &B1_vec_vsrab,
+ &B2_vec_vsrah,
+ &B2_vec_vsraw,
+ &B2_vec_vsrab,
+};
+static const struct builtin *const O_vec_srl[30] = {
+ &B1_vec_vsr,
+ &B2_vec_vsr,
+ &B3_vec_vsr,
+ &B4_vec_vsr,
+ &B5_vec_vsr,
+ &B6_vec_vsr,
+ &B7_vec_vsr,
+ &B8_vec_vsr,
+ &B9_vec_vsr,
+ &B10_vec_vsr,
+ &B11_vec_vsr,
+ &B12_vec_vsr,
+ &B13_vec_vsr,
+ &B14_vec_vsr,
+ &B15_vec_vsr,
+ &B16_vec_vsr,
+ &B17_vec_vsr,
+ &B18_vec_vsr,
+ &B19_vec_vsr,
+ &B20_vec_vsr,
+ &B21_vec_vsr,
+ &B22_vec_vsr,
+ &B23_vec_vsr,
+ &B24_vec_vsr,
+ &B25_vec_vsr,
+ &B26_vec_vsr,
+ &B27_vec_vsr,
+ &B28_vec_vsr,
+ &B29_vec_vsr,
+ &B30_vec_vsr,
+};
+static const struct builtin *const O_vec_sro[16] = {
+ &B1_vec_vsro,
+ &B2_vec_vsro,
+ &B3_vec_vsro,
+ &B4_vec_vsro,
+ &B5_vec_vsro,
+ &B6_vec_vsro,
+ &B7_vec_vsro,
+ &B8_vec_vsro,
+ &B9_vec_vsro,
+ &B10_vec_vsro,
+ &B11_vec_vsro,
+ &B12_vec_vsro,
+ &B13_vec_vsro,
+ &B14_vec_vsro,
+ &B15_vec_vsro,
+ &B16_vec_vsro,
+};
+static const struct builtin *const O_vec_st[30] = {
+ &B1_vec_stvx,
+ &B2_vec_stvx,
+ &B3_vec_stvx,
+ &B4_vec_stvx,
+ &B5_vec_stvx,
+ &B6_vec_stvx,
+ &B7_vec_stvx,
+ &B8_vec_stvx,
+ &B9_vec_stvx,
+ &B10_vec_stvx,
+ &B11_vec_stvx,
+ &B12_vec_stvx,
+ &B13_vec_stvx,
+ &B14_vec_stvx,
+ &B15_vec_stvx,
+ &B16_vec_stvx,
+ &B17_vec_stvx,
+ &B18_vec_stvx,
+ &B19_vec_stvx,
+ &B20_vec_stvx,
+ &B21_vec_stvx,
+ &B22_vec_stvx,
+ &B23_vec_stvx,
+ &B24_vec_stvx,
+ &B25_vec_stvx,
+ &B26_vec_stvx,
+ &B27_vec_stvx,
+ &B28_vec_stvx,
+ &B29_vec_stvx,
+ &B30_vec_stvx,
+};
+static const struct builtin *const O_vec_ste[19] = {
+ &B1_vec_stvebx,
+ &B2_vec_stvebx,
+ &B1_vec_stvewx,
+ &B2_vec_stvewx,
+ &B3_vec_stvewx,
+ &B4_vec_stvewx,
+ &B3_vec_stvebx,
+ &B4_vec_stvebx,
+ &B5_vec_stvewx,
+ &B1_vec_stvehx,
+ &B2_vec_stvehx,
+ &B3_vec_stvehx,
+ &B6_vec_stvewx,
+ &B7_vec_stvewx,
+ &B5_vec_stvebx,
+ &B4_vec_stvehx,
+ &B8_vec_stvewx,
+ &B9_vec_stvewx,
+ &B6_vec_stvebx,
+};
+static const struct builtin *const O_vec_stl[30] = {
+ &B1_vec_stvxl,
+ &B2_vec_stvxl,
+ &B3_vec_stvxl,
+ &B4_vec_stvxl,
+ &B5_vec_stvxl,
+ &B6_vec_stvxl,
+ &B7_vec_stvxl,
+ &B8_vec_stvxl,
+ &B9_vec_stvxl,
+ &B10_vec_stvxl,
+ &B11_vec_stvxl,
+ &B12_vec_stvxl,
+ &B13_vec_stvxl,
+ &B14_vec_stvxl,
+ &B15_vec_stvxl,
+ &B16_vec_stvxl,
+ &B17_vec_stvxl,
+ &B18_vec_stvxl,
+ &B19_vec_stvxl,
+ &B20_vec_stvxl,
+ &B21_vec_stvxl,
+ &B22_vec_stvxl,
+ &B23_vec_stvxl,
+ &B24_vec_stvxl,
+ &B25_vec_stvxl,
+ &B26_vec_stvxl,
+ &B27_vec_stvxl,
+ &B28_vec_stvxl,
+ &B29_vec_stvxl,
+ &B30_vec_stvxl,
+};
+static const struct builtin *const O_vec_stvebx[6] = {
+ &B1_vec_stvebx,
+ &B2_vec_stvebx,
+ &B3_vec_stvebx,
+ &B4_vec_stvebx,
+ &B5_vec_stvebx,
+ &B6_vec_stvebx,
+};
+static const struct builtin *const O_vec_stvehx[4] = {
+ &B1_vec_stvehx,
+ &B2_vec_stvehx,
+ &B3_vec_stvehx,
+ &B4_vec_stvehx,
+};
+static const struct builtin *const O_vec_stvewx[9] = {
+ &B1_vec_stvewx,
+ &B2_vec_stvewx,
+ &B3_vec_stvewx,
+ &B4_vec_stvewx,
+ &B5_vec_stvewx,
+ &B6_vec_stvewx,
+ &B7_vec_stvewx,
+ &B8_vec_stvewx,
+ &B9_vec_stvewx,
+};
+static const struct builtin *const O_vec_stvx[30] = {
+ &B1_vec_stvx,
+ &B2_vec_stvx,
+ &B3_vec_stvx,
+ &B4_vec_stvx,
+ &B5_vec_stvx,
+ &B6_vec_stvx,
+ &B7_vec_stvx,
+ &B8_vec_stvx,
+ &B9_vec_stvx,
+ &B10_vec_stvx,
+ &B11_vec_stvx,
+ &B12_vec_stvx,
+ &B13_vec_stvx,
+ &B14_vec_stvx,
+ &B15_vec_stvx,
+ &B16_vec_stvx,
+ &B17_vec_stvx,
+ &B18_vec_stvx,
+ &B19_vec_stvx,
+ &B20_vec_stvx,
+ &B21_vec_stvx,
+ &B22_vec_stvx,
+ &B23_vec_stvx,
+ &B24_vec_stvx,
+ &B25_vec_stvx,
+ &B26_vec_stvx,
+ &B27_vec_stvx,
+ &B28_vec_stvx,
+ &B29_vec_stvx,
+ &B30_vec_stvx,
+};
+static const struct builtin *const O_vec_stvxl[30] = {
+ &B1_vec_stvxl,
+ &B2_vec_stvxl,
+ &B3_vec_stvxl,
+ &B4_vec_stvxl,
+ &B5_vec_stvxl,
+ &B6_vec_stvxl,
+ &B7_vec_stvxl,
+ &B8_vec_stvxl,
+ &B9_vec_stvxl,
+ &B10_vec_stvxl,
+ &B11_vec_stvxl,
+ &B12_vec_stvxl,
+ &B13_vec_stvxl,
+ &B14_vec_stvxl,
+ &B15_vec_stvxl,
+ &B16_vec_stvxl,
+ &B17_vec_stvxl,
+ &B18_vec_stvxl,
+ &B19_vec_stvxl,
+ &B20_vec_stvxl,
+ &B21_vec_stvxl,
+ &B22_vec_stvxl,
+ &B23_vec_stvxl,
+ &B24_vec_stvxl,
+ &B25_vec_stvxl,
+ &B26_vec_stvxl,
+ &B27_vec_stvxl,
+ &B28_vec_stvxl,
+ &B29_vec_stvxl,
+ &B30_vec_stvxl,
+};
+static const struct builtin *const O_vec_sub[19] = {
+ &B1_vec_vsubuhm,
+ &B2_vec_vsubuhm,
+ &B1_vec_vsubuwm,
+ &B2_vec_vsubuwm,
+ &B1_vec_vsububm,
+ &B2_vec_vsububm,
+ &B_vec_vsubfp,
+ &B3_vec_vsubuhm,
+ &B4_vec_vsubuhm,
+ &B3_vec_vsubuwm,
+ &B4_vec_vsubuwm,
+ &B3_vec_vsububm,
+ &B4_vec_vsububm,
+ &B5_vec_vsubuhm,
+ &B6_vec_vsubuhm,
+ &B5_vec_vsubuwm,
+ &B6_vec_vsubuwm,
+ &B5_vec_vsububm,
+ &B6_vec_vsububm,
+};
+static const struct builtin *const O_vec_subc[1] = {
+ &B_vec_vsubcuw,
+};
+static const struct builtin *const O_vec_subs[18] = {
+ &B1_vec_vsubshs,
+ &B1_vec_vsubuhs,
+ &B1_vec_vsubsws,
+ &B1_vec_vsubuws,
+ &B1_vec_vsubsbs,
+ &B1_vec_vsububs,
+ &B2_vec_vsubshs,
+ &B3_vec_vsubshs,
+ &B2_vec_vsubsws,
+ &B3_vec_vsubsws,
+ &B2_vec_vsubsbs,
+ &B3_vec_vsubsbs,
+ &B2_vec_vsubuhs,
+ &B3_vec_vsubuhs,
+ &B2_vec_vsubuws,
+ &B3_vec_vsubuws,
+ &B2_vec_vsububs,
+ &B3_vec_vsububs,
+};
+static const struct builtin *const O_vec_sum2s[1] = {
+ &B_vec_vsum2sws,
+};
+static const struct builtin *const O_vec_sum4s[3] = {
+ &B_vec_vsum4shs,
+ &B_vec_vsum4sbs,
+ &B_vec_vsum4ubs,
+};
+static const struct builtin *const O_vec_sums[1] = {
+ &B_vec_vsumsws,
+};
+static const struct builtin *const O_vec_trunc[1] = {
+ &B_vec_vrfiz,
+};
+static const struct builtin *const O_vec_unpack2sh[2] = {
+ &B1_vec_unpack2sh,
+ &B2_vec_unpack2sh,
+};
+static const struct builtin *const O_vec_unpack2sl[2] = {
+ &B1_vec_unpack2sl,
+ &B2_vec_unpack2sl,
+};
+static const struct builtin *const O_vec_unpack2uh[2] = {
+ &B1_vec_unpack2uh,
+ &B2_vec_unpack2uh,
+};
+static const struct builtin *const O_vec_unpack2ul[2] = {
+ &B1_vec_unpack2ul,
+ &B2_vec_unpack2ul,
+};
+static const struct builtin *const O_vec_unpackh[5] = {
+ &B1_vec_vupkhsh,
+ &B1_vec_vupkhsb,
+ &B_vec_vupkhpx,
+ &B2_vec_vupkhsh,
+ &B2_vec_vupkhsb,
+};
+static const struct builtin *const O_vec_unpackl[5] = {
+ &B1_vec_vupklsh,
+ &B1_vec_vupklsb,
+ &B_vec_vupklpx,
+ &B2_vec_vupklsh,
+ &B2_vec_vupklsb,
+};
+static const struct builtin *const O_vec_vaddcuw[1] = {
+ &B_vec_vaddcuw,
+};
+static const struct builtin *const O_vec_vaddfp[1] = {
+ &B_vec_vaddfp,
+};
+static const struct builtin *const O_vec_vaddsbs[3] = {
+ &B1_vec_vaddsbs,
+ &B2_vec_vaddsbs,
+ &B3_vec_vaddsbs,
+};
+static const struct builtin *const O_vec_vaddshs[3] = {
+ &B1_vec_vaddshs,
+ &B2_vec_vaddshs,
+ &B3_vec_vaddshs,
+};
+static const struct builtin *const O_vec_vaddsws[3] = {
+ &B1_vec_vaddsws,
+ &B2_vec_vaddsws,
+ &B3_vec_vaddsws,
+};
+static const struct builtin *const O_vec_vaddubm[6] = {
+ &B1_vec_vaddubm,
+ &B2_vec_vaddubm,
+ &B3_vec_vaddubm,
+ &B4_vec_vaddubm,
+ &B5_vec_vaddubm,
+ &B6_vec_vaddubm,
+};
+static const struct builtin *const O_vec_vaddubs[3] = {
+ &B1_vec_vaddubs,
+ &B2_vec_vaddubs,
+ &B3_vec_vaddubs,
+};
+static const struct builtin *const O_vec_vadduhm[6] = {
+ &B1_vec_vadduhm,
+ &B2_vec_vadduhm,
+ &B3_vec_vadduhm,
+ &B4_vec_vadduhm,
+ &B5_vec_vadduhm,
+ &B6_vec_vadduhm,
+};
+static const struct builtin *const O_vec_vadduhs[3] = {
+ &B1_vec_vadduhs,
+ &B2_vec_vadduhs,
+ &B3_vec_vadduhs,
+};
+static const struct builtin *const O_vec_vadduwm[6] = {
+ &B1_vec_vadduwm,
+ &B2_vec_vadduwm,
+ &B3_vec_vadduwm,
+ &B4_vec_vadduwm,
+ &B5_vec_vadduwm,
+ &B6_vec_vadduwm,
+};
+static const struct builtin *const O_vec_vadduws[3] = {
+ &B1_vec_vadduws,
+ &B2_vec_vadduws,
+ &B3_vec_vadduws,
+};
+static const struct builtin *const O_vec_vand[24] = {
+ &B1_vec_vand,
+ &B2_vec_vand,
+ &B3_vec_vand,
+ &B4_vec_vand,
+ &B5_vec_vand,
+ &B6_vec_vand,
+ &B7_vec_vand,
+ &B8_vec_vand,
+ &B9_vec_vand,
+ &B10_vec_vand,
+ &B11_vec_vand,
+ &B12_vec_vand,
+ &B13_vec_vand,
+ &B14_vec_vand,
+ &B15_vec_vand,
+ &B16_vec_vand,
+ &B17_vec_vand,
+ &B18_vec_vand,
+ &B19_vec_vand,
+ &B20_vec_vand,
+ &B21_vec_vand,
+ &B22_vec_vand,
+ &B23_vec_vand,
+ &B24_vec_vand,
+};
+static const struct builtin *const O_vec_vandc[24] = {
+ &B1_vec_vandc,
+ &B2_vec_vandc,
+ &B3_vec_vandc,
+ &B4_vec_vandc,
+ &B5_vec_vandc,
+ &B6_vec_vandc,
+ &B7_vec_vandc,
+ &B8_vec_vandc,
+ &B9_vec_vandc,
+ &B10_vec_vandc,
+ &B11_vec_vandc,
+ &B12_vec_vandc,
+ &B13_vec_vandc,
+ &B14_vec_vandc,
+ &B15_vec_vandc,
+ &B16_vec_vandc,
+ &B17_vec_vandc,
+ &B18_vec_vandc,
+ &B19_vec_vandc,
+ &B20_vec_vandc,
+ &B21_vec_vandc,
+ &B22_vec_vandc,
+ &B23_vec_vandc,
+ &B24_vec_vandc,
+};
+static const struct builtin *const O_vec_vavgsb[1] = {
+ &B_vec_vavgsb,
+};
+static const struct builtin *const O_vec_vavgsh[1] = {
+ &B_vec_vavgsh,
+};
+static const struct builtin *const O_vec_vavgsw[1] = {
+ &B_vec_vavgsw,
+};
+static const struct builtin *const O_vec_vavgub[1] = {
+ &B_vec_vavgub,
+};
+static const struct builtin *const O_vec_vavguh[1] = {
+ &B_vec_vavguh,
+};
+static const struct builtin *const O_vec_vavguw[1] = {
+ &B_vec_vavguw,
+};
+static const struct builtin *const O_vec_vcfsx[1] = {
+ &B_vec_vcfsx,
+};
+static const struct builtin *const O_vec_vcfux[1] = {
+ &B_vec_vcfux,
+};
+static const struct builtin *const O_vec_vcmpbfp[1] = {
+ &B_vec_vcmpbfp,
+};
+static const struct builtin *const O_vec_vcmpeqfp[1] = {
+ &B_vec_vcmpeqfp,
+};
+static const struct builtin *const O_vec_vcmpequb[2] = {
+ &B1_vec_vcmpequb,
+ &B2_vec_vcmpequb,
+};
+static const struct builtin *const O_vec_vcmpequh[2] = {
+ &B1_vec_vcmpequh,
+ &B2_vec_vcmpequh,
+};
+static const struct builtin *const O_vec_vcmpequw[2] = {
+ &B1_vec_vcmpequw,
+ &B2_vec_vcmpequw,
+};
+static const struct builtin *const O_vec_vcmpgefp[1] = {
+ &B_vec_vcmpgefp,
+};
+static const struct builtin *const O_vec_vcmpgtfp[1] = {
+ &B_vec_vcmpgtfp,
+};
+static const struct builtin *const O_vec_vcmpgtsb[1] = {
+ &B_vec_vcmpgtsb,
+};
+static const struct builtin *const O_vec_vcmpgtsh[1] = {
+ &B_vec_vcmpgtsh,
+};
+static const struct builtin *const O_vec_vcmpgtsw[1] = {
+ &B_vec_vcmpgtsw,
+};
+static const struct builtin *const O_vec_vcmpgtub[1] = {
+ &B_vec_vcmpgtub,
+};
+static const struct builtin *const O_vec_vcmpgtuh[1] = {
+ &B_vec_vcmpgtuh,
+};
+static const struct builtin *const O_vec_vcmpgtuw[1] = {
+ &B_vec_vcmpgtuw,
+};
+static const struct builtin *const O_vec_vctsxs[1] = {
+ &B_vec_vctsxs,
+};
+static const struct builtin *const O_vec_vctuxs[1] = {
+ &B_vec_vctuxs,
+};
+static const struct builtin *const O_vec_vexptefp[1] = {
+ &B_vec_vexptefp,
+};
+static const struct builtin *const O_vec_vlogefp[1] = {
+ &B_vec_vlogefp,
+};
+static const struct builtin *const O_vec_vmaddfp[1] = {
+ &B_vec_vmaddfp,
+};
+static const struct builtin *const O_vec_vmaxfp[1] = {
+ &B_vec_vmaxfp,
+};
+static const struct builtin *const O_vec_vmaxsb[3] = {
+ &B1_vec_vmaxsb,
+ &B2_vec_vmaxsb,
+ &B3_vec_vmaxsb,
+};
+static const struct builtin *const O_vec_vmaxsh[3] = {
+ &B1_vec_vmaxsh,
+ &B2_vec_vmaxsh,
+ &B3_vec_vmaxsh,
+};
+static const struct builtin *const O_vec_vmaxsw[3] = {
+ &B1_vec_vmaxsw,
+ &B2_vec_vmaxsw,
+ &B3_vec_vmaxsw,
+};
+static const struct builtin *const O_vec_vmaxub[3] = {
+ &B1_vec_vmaxub,
+ &B2_vec_vmaxub,
+ &B3_vec_vmaxub,
+};
+static const struct builtin *const O_vec_vmaxuh[3] = {
+ &B1_vec_vmaxuh,
+ &B2_vec_vmaxuh,
+ &B3_vec_vmaxuh,
+};
+static const struct builtin *const O_vec_vmaxuw[3] = {
+ &B1_vec_vmaxuw,
+ &B2_vec_vmaxuw,
+ &B3_vec_vmaxuw,
+};
+static const struct builtin *const O_vec_vmhaddshs[1] = {
+ &B_vec_vmhaddshs,
+};
+static const struct builtin *const O_vec_vmhraddshs[1] = {
+ &B_vec_vmhraddshs,
+};
+static const struct builtin *const O_vec_vminfp[1] = {
+ &B_vec_vminfp,
+};
+static const struct builtin *const O_vec_vminsb[3] = {
+ &B1_vec_vminsb,
+ &B2_vec_vminsb,
+ &B3_vec_vminsb,
+};
+static const struct builtin *const O_vec_vminsh[3] = {
+ &B1_vec_vminsh,
+ &B2_vec_vminsh,
+ &B3_vec_vminsh,
+};
+static const struct builtin *const O_vec_vminsw[3] = {
+ &B1_vec_vminsw,
+ &B2_vec_vminsw,
+ &B3_vec_vminsw,
+};
+static const struct builtin *const O_vec_vminub[3] = {
+ &B1_vec_vminub,
+ &B2_vec_vminub,
+ &B3_vec_vminub,
+};
+static const struct builtin *const O_vec_vminuh[3] = {
+ &B1_vec_vminuh,
+ &B2_vec_vminuh,
+ &B3_vec_vminuh,
+};
+static const struct builtin *const O_vec_vminuw[3] = {
+ &B1_vec_vminuw,
+ &B2_vec_vminuw,
+ &B3_vec_vminuw,
+};
+static const struct builtin *const O_vec_vmladduhm[4] = {
+ &B1_vec_vmladduhm,
+ &B2_vec_vmladduhm,
+ &B3_vec_vmladduhm,
+ &B4_vec_vmladduhm,
+};
+static const struct builtin *const O_vec_vmrghb[3] = {
+ &B1_vec_vmrghb,
+ &B2_vec_vmrghb,
+ &B3_vec_vmrghb,
+};
+static const struct builtin *const O_vec_vmrghh[4] = {
+ &B1_vec_vmrghh,
+ &B2_vec_vmrghh,
+ &B3_vec_vmrghh,
+ &B4_vec_vmrghh,
+};
+static const struct builtin *const O_vec_vmrghw[4] = {
+ &B1_vec_vmrghw,
+ &B2_vec_vmrghw,
+ &B3_vec_vmrghw,
+ &B4_vec_vmrghw,
+};
+static const struct builtin *const O_vec_vmrglb[3] = {
+ &B1_vec_vmrglb,
+ &B2_vec_vmrglb,
+ &B3_vec_vmrglb,
+};
+static const struct builtin *const O_vec_vmrglh[4] = {
+ &B1_vec_vmrglh,
+ &B2_vec_vmrglh,
+ &B3_vec_vmrglh,
+ &B4_vec_vmrglh,
+};
+static const struct builtin *const O_vec_vmrglw[4] = {
+ &B1_vec_vmrglw,
+ &B2_vec_vmrglw,
+ &B3_vec_vmrglw,
+ &B4_vec_vmrglw,
+};
+static const struct builtin *const O_vec_vmsummbm[1] = {
+ &B_vec_vmsummbm,
+};
+static const struct builtin *const O_vec_vmsumshm[1] = {
+ &B_vec_vmsumshm,
+};
+static const struct builtin *const O_vec_vmsumshs[1] = {
+ &B_vec_vmsumshs,
+};
+static const struct builtin *const O_vec_vmsumubm[1] = {
+ &B_vec_vmsumubm,
+};
+static const struct builtin *const O_vec_vmsumuhm[1] = {
+ &B_vec_vmsumuhm,
+};
+static const struct builtin *const O_vec_vmsumuhs[1] = {
+ &B_vec_vmsumuhs,
+};
+static const struct builtin *const O_vec_vmulesb[1] = {
+ &B_vec_vmulesb,
+};
+static const struct builtin *const O_vec_vmulesh[1] = {
+ &B_vec_vmulesh,
+};
+static const struct builtin *const O_vec_vmuleub[1] = {
+ &B_vec_vmuleub,
+};
+static const struct builtin *const O_vec_vmuleuh[1] = {
+ &B_vec_vmuleuh,
+};
+static const struct builtin *const O_vec_vmulosb[1] = {
+ &B_vec_vmulosb,
+};
+static const struct builtin *const O_vec_vmulosh[1] = {
+ &B_vec_vmulosh,
+};
+static const struct builtin *const O_vec_vmuloub[1] = {
+ &B_vec_vmuloub,
+};
+static const struct builtin *const O_vec_vmulouh[1] = {
+ &B_vec_vmulouh,
+};
+static const struct builtin *const O_vec_vnmsubfp[1] = {
+ &B_vec_vnmsubfp,
+};
+static const struct builtin *const O_vec_vnor[10] = {
+ &B1_vec_vnor,
+ &B2_vec_vnor,
+ &B3_vec_vnor,
+ &B4_vec_vnor,
+ &B5_vec_vnor,
+ &B6_vec_vnor,
+ &B7_vec_vnor,
+ &B8_vec_vnor,
+ &B9_vec_vnor,
+ &B10_vec_vnor,
+};
+static const struct builtin *const O_vec_vor[24] = {
+ &B1_vec_vor,
+ &B2_vec_vor,
+ &B3_vec_vor,
+ &B4_vec_vor,
+ &B5_vec_vor,
+ &B6_vec_vor,
+ &B7_vec_vor,
+ &B8_vec_vor,
+ &B9_vec_vor,
+ &B10_vec_vor,
+ &B11_vec_vor,
+ &B12_vec_vor,
+ &B13_vec_vor,
+ &B14_vec_vor,
+ &B15_vec_vor,
+ &B16_vec_vor,
+ &B17_vec_vor,
+ &B18_vec_vor,
+ &B19_vec_vor,
+ &B20_vec_vor,
+ &B21_vec_vor,
+ &B22_vec_vor,
+ &B23_vec_vor,
+ &B24_vec_vor,
+};
+static const struct builtin *const O_vec_vperm[11] = {
+ &B1_vec_vperm,
+ &B2_vec_vperm,
+ &B3_vec_vperm,
+ &B4_vec_vperm,
+ &B5_vec_vperm,
+ &B6_vec_vperm,
+ &B7_vec_vperm,
+ &B8_vec_vperm,
+ &B9_vec_vperm,
+ &B10_vec_vperm,
+ &B11_vec_vperm,
+};
+static const struct builtin *const O_vec_vpkpx[1] = {
+ &B_vec_vpkpx,
+};
+static const struct builtin *const O_vec_vpkshss[1] = {
+ &B_vec_vpkshss,
+};
+static const struct builtin *const O_vec_vpkshus[1] = {
+ &B_vec_vpkshus,
+};
+static const struct builtin *const O_vec_vpkswss[1] = {
+ &B_vec_vpkswss,
+};
+static const struct builtin *const O_vec_vpkswus[1] = {
+ &B_vec_vpkswus,
+};
+static const struct builtin *const O_vec_vpkuhum[3] = {
+ &B1_vec_vpkuhum,
+ &B2_vec_vpkuhum,
+ &B3_vec_vpkuhum,
+};
+static const struct builtin *const O_vec_vpkuhus[1] = {
+ &B_vec_vpkuhus,
+};
+static const struct builtin *const O_vec_vpkuwum[3] = {
+ &B1_vec_vpkuwum,
+ &B2_vec_vpkuwum,
+ &B3_vec_vpkuwum,
+};
+static const struct builtin *const O_vec_vpkuwus[1] = {
+ &B_vec_vpkuwus,
+};
+static const struct builtin *const O_vec_vrefp[1] = {
+ &B_vec_vrefp,
+};
+static const struct builtin *const O_vec_vrfim[1] = {
+ &B_vec_vrfim,
+};
+static const struct builtin *const O_vec_vrfin[1] = {
+ &B_vec_vrfin,
+};
+static const struct builtin *const O_vec_vrfip[1] = {
+ &B_vec_vrfip,
+};
+static const struct builtin *const O_vec_vrfiz[1] = {
+ &B_vec_vrfiz,
+};
+static const struct builtin *const O_vec_vrlb[2] = {
+ &B1_vec_vrlb,
+ &B2_vec_vrlb,
+};
+static const struct builtin *const O_vec_vrlh[2] = {
+ &B1_vec_vrlh,
+ &B2_vec_vrlh,
+};
+static const struct builtin *const O_vec_vrlw[2] = {
+ &B1_vec_vrlw,
+ &B2_vec_vrlw,
+};
+static const struct builtin *const O_vec_vrsqrtefp[1] = {
+ &B_vec_vrsqrtefp,
+};
+static const struct builtin *const O_vec_vsel[20] = {
+ &B1_vec_vsel,
+ &B2_vec_vsel,
+ &B3_vec_vsel,
+ &B4_vec_vsel,
+ &B5_vec_vsel,
+ &B6_vec_vsel,
+ &B7_vec_vsel,
+ &B8_vec_vsel,
+ &B9_vec_vsel,
+ &B10_vec_vsel,
+ &B11_vec_vsel,
+ &B12_vec_vsel,
+ &B13_vec_vsel,
+ &B14_vec_vsel,
+ &B15_vec_vsel,
+ &B16_vec_vsel,
+ &B17_vec_vsel,
+ &B18_vec_vsel,
+ &B19_vec_vsel,
+ &B20_vec_vsel,
+};
+static const struct builtin *const O_vec_vsl[30] = {
+ &B1_vec_vsl,
+ &B2_vec_vsl,
+ &B3_vec_vsl,
+ &B4_vec_vsl,
+ &B5_vec_vsl,
+ &B6_vec_vsl,
+ &B7_vec_vsl,
+ &B8_vec_vsl,
+ &B9_vec_vsl,
+ &B10_vec_vsl,
+ &B11_vec_vsl,
+ &B12_vec_vsl,
+ &B13_vec_vsl,
+ &B14_vec_vsl,
+ &B15_vec_vsl,
+ &B16_vec_vsl,
+ &B17_vec_vsl,
+ &B18_vec_vsl,
+ &B19_vec_vsl,
+ &B20_vec_vsl,
+ &B21_vec_vsl,
+ &B22_vec_vsl,
+ &B23_vec_vsl,
+ &B24_vec_vsl,
+ &B25_vec_vsl,
+ &B26_vec_vsl,
+ &B27_vec_vsl,
+ &B28_vec_vsl,
+ &B29_vec_vsl,
+ &B30_vec_vsl,
+};
+static const struct builtin *const O_vec_vslb[2] = {
+ &B1_vec_vslb,
+ &B2_vec_vslb,
+};
+static const struct builtin *const O_vec_vsldoi[11] = {
+ &B1_vec_vsldoi,
+ &B2_vec_vsldoi,
+ &B3_vec_vsldoi,
+ &B4_vec_vsldoi,
+ &B5_vec_vsldoi,
+ &B6_vec_vsldoi,
+ &B7_vec_vsldoi,
+ &B8_vec_vsldoi,
+ &B9_vec_vsldoi,
+ &B10_vec_vsldoi,
+ &B11_vec_vsldoi,
+};
+static const struct builtin *const O_vec_vslh[2] = {
+ &B1_vec_vslh,
+ &B2_vec_vslh,
+};
+static const struct builtin *const O_vec_vslo[16] = {
+ &B1_vec_vslo,
+ &B2_vec_vslo,
+ &B3_vec_vslo,
+ &B4_vec_vslo,
+ &B5_vec_vslo,
+ &B6_vec_vslo,
+ &B7_vec_vslo,
+ &B8_vec_vslo,
+ &B9_vec_vslo,
+ &B10_vec_vslo,
+ &B11_vec_vslo,
+ &B12_vec_vslo,
+ &B13_vec_vslo,
+ &B14_vec_vslo,
+ &B15_vec_vslo,
+ &B16_vec_vslo,
+};
+static const struct builtin *const O_vec_vslw[2] = {
+ &B1_vec_vslw,
+ &B2_vec_vslw,
+};
+static const struct builtin *const O_vec_vspltb[3] = {
+ &B1_vec_vspltb,
+ &B2_vec_vspltb,
+ &B3_vec_vspltb,
+};
+static const struct builtin *const O_vec_vsplth[4] = {
+ &B1_vec_vsplth,
+ &B2_vec_vsplth,
+ &B3_vec_vsplth,
+ &B4_vec_vsplth,
+};
+static const struct builtin *const O_vec_vspltisb[1] = {
+ &B_vec_vspltisb,
+};
+static const struct builtin *const O_vec_vspltish[1] = {
+ &B_vec_vspltish,
+};
+static const struct builtin *const O_vec_vspltisw[1] = {
+ &B_vec_vspltisw,
+};
+static const struct builtin *const O_vec_vspltw[4] = {
+ &B1_vec_vspltw,
+ &B2_vec_vspltw,
+ &B3_vec_vspltw,
+ &B4_vec_vspltw,
+};
+static const struct builtin *const O_vec_vsr[30] = {
+ &B1_vec_vsr,
+ &B2_vec_vsr,
+ &B3_vec_vsr,
+ &B4_vec_vsr,
+ &B5_vec_vsr,
+ &B6_vec_vsr,
+ &B7_vec_vsr,
+ &B8_vec_vsr,
+ &B9_vec_vsr,
+ &B10_vec_vsr,
+ &B11_vec_vsr,
+ &B12_vec_vsr,
+ &B13_vec_vsr,
+ &B14_vec_vsr,
+ &B15_vec_vsr,
+ &B16_vec_vsr,
+ &B17_vec_vsr,
+ &B18_vec_vsr,
+ &B19_vec_vsr,
+ &B20_vec_vsr,
+ &B21_vec_vsr,
+ &B22_vec_vsr,
+ &B23_vec_vsr,
+ &B24_vec_vsr,
+ &B25_vec_vsr,
+ &B26_vec_vsr,
+ &B27_vec_vsr,
+ &B28_vec_vsr,
+ &B29_vec_vsr,
+ &B30_vec_vsr,
+};
+static const struct builtin *const O_vec_vsrab[2] = {
+ &B1_vec_vsrab,
+ &B2_vec_vsrab,
+};
+static const struct builtin *const O_vec_vsrah[2] = {
+ &B1_vec_vsrah,
+ &B2_vec_vsrah,
+};
+static const struct builtin *const O_vec_vsraw[2] = {
+ &B1_vec_vsraw,
+ &B2_vec_vsraw,
+};
+static const struct builtin *const O_vec_vsrb[2] = {
+ &B1_vec_vsrb,
+ &B2_vec_vsrb,
+};
+static const struct builtin *const O_vec_vsrh[2] = {
+ &B1_vec_vsrh,
+ &B2_vec_vsrh,
+};
+static const struct builtin *const O_vec_vsro[16] = {
+ &B1_vec_vsro,
+ &B2_vec_vsro,
+ &B3_vec_vsro,
+ &B4_vec_vsro,
+ &B5_vec_vsro,
+ &B6_vec_vsro,
+ &B7_vec_vsro,
+ &B8_vec_vsro,
+ &B9_vec_vsro,
+ &B10_vec_vsro,
+ &B11_vec_vsro,
+ &B12_vec_vsro,
+ &B13_vec_vsro,
+ &B14_vec_vsro,
+ &B15_vec_vsro,
+ &B16_vec_vsro,
+};
+static const struct builtin *const O_vec_vsrw[2] = {
+ &B1_vec_vsrw,
+ &B2_vec_vsrw,
+};
+static const struct builtin *const O_vec_vsubcuw[1] = {
+ &B_vec_vsubcuw,
+};
+static const struct builtin *const O_vec_vsubfp[1] = {
+ &B_vec_vsubfp,
+};
+static const struct builtin *const O_vec_vsubsbs[3] = {
+ &B1_vec_vsubsbs,
+ &B2_vec_vsubsbs,
+ &B3_vec_vsubsbs,
+};
+static const struct builtin *const O_vec_vsubshs[3] = {
+ &B1_vec_vsubshs,
+ &B2_vec_vsubshs,
+ &B3_vec_vsubshs,
+};
+static const struct builtin *const O_vec_vsubsws[3] = {
+ &B1_vec_vsubsws,
+ &B2_vec_vsubsws,
+ &B3_vec_vsubsws,
+};
+static const struct builtin *const O_vec_vsububm[6] = {
+ &B1_vec_vsububm,
+ &B2_vec_vsububm,
+ &B3_vec_vsububm,
+ &B4_vec_vsububm,
+ &B5_vec_vsububm,
+ &B6_vec_vsububm,
+};
+static const struct builtin *const O_vec_vsububs[3] = {
+ &B1_vec_vsububs,
+ &B2_vec_vsububs,
+ &B3_vec_vsububs,
+};
+static const struct builtin *const O_vec_vsubuhm[6] = {
+ &B1_vec_vsubuhm,
+ &B2_vec_vsubuhm,
+ &B3_vec_vsubuhm,
+ &B4_vec_vsubuhm,
+ &B5_vec_vsubuhm,
+ &B6_vec_vsubuhm,
+};
+static const struct builtin *const O_vec_vsubuhs[3] = {
+ &B1_vec_vsubuhs,
+ &B2_vec_vsubuhs,
+ &B3_vec_vsubuhs,
+};
+static const struct builtin *const O_vec_vsubuwm[6] = {
+ &B1_vec_vsubuwm,
+ &B2_vec_vsubuwm,
+ &B3_vec_vsubuwm,
+ &B4_vec_vsubuwm,
+ &B5_vec_vsubuwm,
+ &B6_vec_vsubuwm,
+};
+static const struct builtin *const O_vec_vsubuws[3] = {
+ &B1_vec_vsubuws,
+ &B2_vec_vsubuws,
+ &B3_vec_vsubuws,
+};
+static const struct builtin *const O_vec_vsum2sws[1] = {
+ &B_vec_vsum2sws,
+};
+static const struct builtin *const O_vec_vsum4sbs[1] = {
+ &B_vec_vsum4sbs,
+};
+static const struct builtin *const O_vec_vsum4shs[1] = {
+ &B_vec_vsum4shs,
+};
+static const struct builtin *const O_vec_vsum4ubs[1] = {
+ &B_vec_vsum4ubs,
+};
+static const struct builtin *const O_vec_vsumsws[1] = {
+ &B_vec_vsumsws,
+};
+static const struct builtin *const O_vec_vupkhpx[1] = {
+ &B_vec_vupkhpx,
+};
+static const struct builtin *const O_vec_vupkhsb[2] = {
+ &B1_vec_vupkhsb,
+ &B2_vec_vupkhsb,
+};
+static const struct builtin *const O_vec_vupkhsh[2] = {
+ &B1_vec_vupkhsh,
+ &B2_vec_vupkhsh,
+};
+static const struct builtin *const O_vec_vupklpx[1] = {
+ &B_vec_vupklpx,
+};
+static const struct builtin *const O_vec_vupklsb[2] = {
+ &B1_vec_vupklsb,
+ &B2_vec_vupklsb,
+};
+static const struct builtin *const O_vec_vupklsh[2] = {
+ &B1_vec_vupklsh,
+ &B2_vec_vupklsh,
+};
+static const struct builtin *const O_vec_vxor[24] = {
+ &B1_vec_vxor,
+ &B2_vec_vxor,
+ &B3_vec_vxor,
+ &B4_vec_vxor,
+ &B5_vec_vxor,
+ &B6_vec_vxor,
+ &B7_vec_vxor,
+ &B8_vec_vxor,
+ &B9_vec_vxor,
+ &B10_vec_vxor,
+ &B11_vec_vxor,
+ &B12_vec_vxor,
+ &B13_vec_vxor,
+ &B14_vec_vxor,
+ &B15_vec_vxor,
+ &B16_vec_vxor,
+ &B17_vec_vxor,
+ &B18_vec_vxor,
+ &B19_vec_vxor,
+ &B20_vec_vxor,
+ &B21_vec_vxor,
+ &B22_vec_vxor,
+ &B23_vec_vxor,
+ &B24_vec_vxor,
+};
+static const struct builtin *const O_vec_xor[24] = {
+ &B1_vec_vxor,
+ &B2_vec_vxor,
+ &B3_vec_vxor,
+ &B4_vec_vxor,
+ &B5_vec_vxor,
+ &B6_vec_vxor,
+ &B7_vec_vxor,
+ &B8_vec_vxor,
+ &B9_vec_vxor,
+ &B10_vec_vxor,
+ &B11_vec_vxor,
+ &B12_vec_vxor,
+ &B13_vec_vxor,
+ &B14_vec_vxor,
+ &B15_vec_vxor,
+ &B16_vec_vxor,
+ &B17_vec_vxor,
+ &B18_vec_vxor,
+ &B19_vec_vxor,
+ &B20_vec_vxor,
+ &B21_vec_vxor,
+ &B22_vec_vxor,
+ &B23_vec_vxor,
+ &B24_vec_vxor,
+};
+
+const struct overloadx Overload[] = {
+ { "vec_abs", 4, 1, O_vec_abs, O_UID(0) },
+ { "vec_abss", 3, 1, O_vec_abss, O_UID(1) },
+ { "vec_add", 19, 2, O_vec_add, O_UID(2) },
+ { "vec_addc", 1, 2, O_vec_addc, O_UID(3) },
+ { "vec_adds", 18, 2, O_vec_adds, O_UID(4) },
+ { "vec_all_eq", 23, 2, O_vec_all_eq, O_UID(5) },
+ { "vec_all_ge", 19, 2, O_vec_all_ge, O_UID(6) },
+ { "vec_all_gt", 19, 2, O_vec_all_gt, O_UID(7) },
+ { "vec_all_in", 1, 2, O_vec_all_in, O_UID(8) },
+ { "vec_all_le", 19, 2, O_vec_all_le, O_UID(9) },
+ { "vec_all_lt", 19, 2, O_vec_all_lt, O_UID(10) },
+ { "vec_all_nan", 1, 1, O_vec_all_nan, O_UID(11) },
+ { "vec_all_ne", 23, 2, O_vec_all_ne, O_UID(12) },
+ { "vec_all_nge", 1, 2, O_vec_all_nge, O_UID(13) },
+ { "vec_all_ngt", 1, 2, O_vec_all_ngt, O_UID(14) },
+ { "vec_all_nle", 1, 2, O_vec_all_nle, O_UID(15) },
+ { "vec_all_nlt", 1, 2, O_vec_all_nlt, O_UID(16) },
+ { "vec_all_numeric", 1, 1, O_vec_all_numeric, O_UID(17) },
+ { "vec_and", 24, 2, O_vec_and, O_UID(18) },
+ { "vec_andc", 24, 2, O_vec_andc, O_UID(19) },
+ { "vec_any_eq", 23, 2, O_vec_any_eq, O_UID(20) },
+ { "vec_any_ge", 19, 2, O_vec_any_ge, O_UID(21) },
+ { "vec_any_gt", 19, 2, O_vec_any_gt, O_UID(22) },
+ { "vec_any_le", 19, 2, O_vec_any_le, O_UID(23) },
+ { "vec_any_lt", 19, 2, O_vec_any_lt, O_UID(24) },
+ { "vec_any_nan", 1, 1, O_vec_any_nan, O_UID(25) },
+ { "vec_any_ne", 23, 2, O_vec_any_ne, O_UID(26) },
+ { "vec_any_nge", 1, 2, O_vec_any_nge, O_UID(27) },
+ { "vec_any_ngt", 1, 2, O_vec_any_ngt, O_UID(28) },
+ { "vec_any_nle", 1, 2, O_vec_any_nle, O_UID(29) },
+ { "vec_any_nlt", 1, 2, O_vec_any_nlt, O_UID(30) },
+ { "vec_any_numeric", 1, 1, O_vec_any_numeric, O_UID(31) },
+ { "vec_any_out", 1, 2, O_vec_any_out, O_UID(32) },
+ { "vec_avg", 6, 2, O_vec_avg, O_UID(33) },
+ { "vec_ceil", 1, 1, O_vec_ceil, O_UID(34) },
+ { "vec_cmpb", 1, 2, O_vec_cmpb, O_UID(35) },
+ { "vec_cmpeq", 7, 2, O_vec_cmpeq, O_UID(36) },
+ { "vec_cmpge", 1, 2, O_vec_cmpge, O_UID(37) },
+ { "vec_cmpgt", 7, 2, O_vec_cmpgt, O_UID(38) },
+ { "vec_cmple", 1, 2, O_vec_cmple, O_UID(39) },
+ { "vec_cmplt", 7, 2, O_vec_cmplt, O_UID(40) },
+ { "vec_ctf", 2, 2, O_vec_ctf, O_UID(41) },
+ { "vec_cts", 1, 2, O_vec_cts, O_UID(42) },
+ { "vec_ctu", 1, 2, O_vec_ctu, O_UID(43) },
+ { "vec_dss", 1, 1, O_vec_dss, O_UID(44) },
+ { "vec_dssall", 1, 0, O_vec_dssall, O_UID(45) },
+ { "vec_dst", 20, 3, O_vec_dst, O_UID(46) },
+ { "vec_dstst", 20, 3, O_vec_dstst, O_UID(47) },
+ { "vec_dststt", 20, 3, O_vec_dststt, O_UID(48) },
+ { "vec_dstt", 20, 3, O_vec_dstt, O_UID(49) },
+ { "vec_expte", 1, 1, O_vec_expte, O_UID(50) },
+ { "vec_floor", 1, 1, O_vec_floor, O_UID(51) },
+ { "vec_ld", 20, 2, O_vec_ld, O_UID(52) },
+ { "vec_lde", 9, 2, O_vec_lde, O_UID(53) },
+ { "vec_ldl", 20, 2, O_vec_ldl, O_UID(54) },
+ { "vec_loge", 1, 1, O_vec_loge, O_UID(55) },
+ { "vec_lvebx", 2, 2, O_vec_lvebx, O_UID(56) },
+ { "vec_lvehx", 2, 2, O_vec_lvehx, O_UID(57) },
+ { "vec_lvewx", 5, 2, O_vec_lvewx, O_UID(58) },
+ { "vec_lvsl", 9, 2, O_vec_lvsl, O_UID(59) },
+ { "vec_lvsr", 9, 2, O_vec_lvsr, O_UID(60) },
+ { "vec_lvx", 20, 2, O_vec_lvx, O_UID(61) },
+ { "vec_lvxl", 20, 2, O_vec_lvxl, O_UID(62) },
+ { "vec_madd", 1, 3, O_vec_madd, O_UID(63) },
+ { "vec_madds", 1, 3, O_vec_madds, O_UID(64) },
+ { "vec_max", 19, 2, O_vec_max, O_UID(65) },
+ { "vec_mergeh", 11, 2, O_vec_mergeh, O_UID(66) },
+ { "vec_mergel", 11, 2, O_vec_mergel, O_UID(67) },
+ { "vec_mfvscr", 1, 0, O_vec_mfvscr, O_UID(68) },
+ { "vec_min", 19, 2, O_vec_min, O_UID(69) },
+ { "vec_mladd", 4, 3, O_vec_mladd, O_UID(70) },
+ { "vec_mradds", 1, 3, O_vec_mradds, O_UID(71) },
+ { "vec_msum", 4, 3, O_vec_msum, O_UID(72) },
+ { "vec_msums", 2, 3, O_vec_msums, O_UID(73) },
+ { "vec_mtvscr", 10, 1, O_vec_mtvscr, O_UID(74) },
+ { "vec_mule", 4, 2, O_vec_mule, O_UID(75) },
+ { "vec_mulo", 4, 2, O_vec_mulo, O_UID(76) },
+ { "vec_nmsub", 1, 3, O_vec_nmsub, O_UID(77) },
+ { "vec_nor", 10, 2, O_vec_nor, O_UID(78) },
+ { "vec_or", 24, 2, O_vec_or, O_UID(79) },
+ { "vec_pack", 6, 2, O_vec_pack, O_UID(80) },
+ { "vec_packpx", 1, 2, O_vec_packpx, O_UID(81) },
+ { "vec_packs", 4, 2, O_vec_packs, O_UID(82) },
+ { "vec_packsu", 4, 2, O_vec_packsu, O_UID(83) },
+ { "vec_perm", 11, 3, O_vec_perm, O_UID(84) },
+ { "vec_re", 1, 1, O_vec_re, O_UID(85) },
+ { "vec_rl", 6, 2, O_vec_rl, O_UID(86) },
+ { "vec_round", 1, 1, O_vec_round, O_UID(87) },
+ { "vec_rsqrte", 1, 1, O_vec_rsqrte, O_UID(88) },
+ { "vec_sel", 20, 3, O_vec_sel, O_UID(89) },
+ { "vec_sl", 6, 2, O_vec_sl, O_UID(90) },
+ { "vec_sld", 11, 3, O_vec_sld, O_UID(91) },
+ { "vec_sll", 30, 2, O_vec_sll, O_UID(92) },
+ { "vec_slo", 16, 2, O_vec_slo, O_UID(93) },
+ { "vec_splat", 11, 2, O_vec_splat, O_UID(94) },
+ { "vec_splat_s16", 1, 1, O_vec_splat_s16, O_UID(95) },
+ { "vec_splat_s32", 1, 1, O_vec_splat_s32, O_UID(96) },
+ { "vec_splat_s8", 1, 1, O_vec_splat_s8, O_UID(97) },
+ { "vec_splat_u16", 1, 1, O_vec_splat_u16, O_UID(98) },
+ { "vec_splat_u32", 1, 1, O_vec_splat_u32, O_UID(99) },
+ { "vec_splat_u8", 1, 1, O_vec_splat_u8, O_UID(100) },
+ { "vec_sr", 6, 2, O_vec_sr, O_UID(101) },
+ { "vec_sra", 6, 2, O_vec_sra, O_UID(102) },
+ { "vec_srl", 30, 2, O_vec_srl, O_UID(103) },
+ { "vec_sro", 16, 2, O_vec_sro, O_UID(104) },
+ { "vec_st", 30, 3, O_vec_st, O_UID(105) },
+ { "vec_ste", 19, 3, O_vec_ste, O_UID(106) },
+ { "vec_stl", 30, 3, O_vec_stl, O_UID(107) },
+ { "vec_stvebx", 6, 3, O_vec_stvebx, O_UID(108) },
+ { "vec_stvehx", 4, 3, O_vec_stvehx, O_UID(109) },
+ { "vec_stvewx", 9, 3, O_vec_stvewx, O_UID(110) },
+ { "vec_stvx", 30, 3, O_vec_stvx, O_UID(111) },
+ { "vec_stvxl", 30, 3, O_vec_stvxl, O_UID(112) },
+ { "vec_sub", 19, 2, O_vec_sub, O_UID(113) },
+ { "vec_subc", 1, 2, O_vec_subc, O_UID(114) },
+ { "vec_subs", 18, 2, O_vec_subs, O_UID(115) },
+ { "vec_sum2s", 1, 2, O_vec_sum2s, O_UID(116) },
+ { "vec_sum4s", 3, 2, O_vec_sum4s, O_UID(117) },
+ { "vec_sums", 1, 2, O_vec_sums, O_UID(118) },
+ { "vec_trunc", 1, 1, O_vec_trunc, O_UID(119) },
+ { "vec_unpack2sh", 2, 2, O_vec_unpack2sh, O_UID(120) },
+ { "vec_unpack2sl", 2, 2, O_vec_unpack2sl, O_UID(121) },
+ { "vec_unpack2uh", 2, 2, O_vec_unpack2uh, O_UID(122) },
+ { "vec_unpack2ul", 2, 2, O_vec_unpack2ul, O_UID(123) },
+ { "vec_unpackh", 5, 1, O_vec_unpackh, O_UID(124) },
+ { "vec_unpackl", 5, 1, O_vec_unpackl, O_UID(125) },
+ { "vec_vaddcuw", 1, 2, O_vec_vaddcuw, O_UID(126) },
+ { "vec_vaddfp", 1, 2, O_vec_vaddfp, O_UID(127) },
+ { "vec_vaddsbs", 3, 2, O_vec_vaddsbs, O_UID(128) },
+ { "vec_vaddshs", 3, 2, O_vec_vaddshs, O_UID(129) },
+ { "vec_vaddsws", 3, 2, O_vec_vaddsws, O_UID(130) },
+ { "vec_vaddubm", 6, 2, O_vec_vaddubm, O_UID(131) },
+ { "vec_vaddubs", 3, 2, O_vec_vaddubs, O_UID(132) },
+ { "vec_vadduhm", 6, 2, O_vec_vadduhm, O_UID(133) },
+ { "vec_vadduhs", 3, 2, O_vec_vadduhs, O_UID(134) },
+ { "vec_vadduwm", 6, 2, O_vec_vadduwm, O_UID(135) },
+ { "vec_vadduws", 3, 2, O_vec_vadduws, O_UID(136) },
+ { "vec_vand", 24, 2, O_vec_vand, O_UID(137) },
+ { "vec_vandc", 24, 2, O_vec_vandc, O_UID(138) },
+ { "vec_vavgsb", 1, 2, O_vec_vavgsb, O_UID(139) },
+ { "vec_vavgsh", 1, 2, O_vec_vavgsh, O_UID(140) },
+ { "vec_vavgsw", 1, 2, O_vec_vavgsw, O_UID(141) },
+ { "vec_vavgub", 1, 2, O_vec_vavgub, O_UID(142) },
+ { "vec_vavguh", 1, 2, O_vec_vavguh, O_UID(143) },
+ { "vec_vavguw", 1, 2, O_vec_vavguw, O_UID(144) },
+ { "vec_vcfsx", 1, 2, O_vec_vcfsx, O_UID(145) },
+ { "vec_vcfux", 1, 2, O_vec_vcfux, O_UID(146) },
+ { "vec_vcmpbfp", 1, 2, O_vec_vcmpbfp, O_UID(147) },
+ { "vec_vcmpeqfp", 1, 2, O_vec_vcmpeqfp, O_UID(148) },
+ { "vec_vcmpequb", 2, 2, O_vec_vcmpequb, O_UID(149) },
+ { "vec_vcmpequh", 2, 2, O_vec_vcmpequh, O_UID(150) },
+ { "vec_vcmpequw", 2, 2, O_vec_vcmpequw, O_UID(151) },
+ { "vec_vcmpgefp", 1, 2, O_vec_vcmpgefp, O_UID(152) },
+ { "vec_vcmpgtfp", 1, 2, O_vec_vcmpgtfp, O_UID(153) },
+ { "vec_vcmpgtsb", 1, 2, O_vec_vcmpgtsb, O_UID(154) },
+ { "vec_vcmpgtsh", 1, 2, O_vec_vcmpgtsh, O_UID(155) },
+ { "vec_vcmpgtsw", 1, 2, O_vec_vcmpgtsw, O_UID(156) },
+ { "vec_vcmpgtub", 1, 2, O_vec_vcmpgtub, O_UID(157) },
+ { "vec_vcmpgtuh", 1, 2, O_vec_vcmpgtuh, O_UID(158) },
+ { "vec_vcmpgtuw", 1, 2, O_vec_vcmpgtuw, O_UID(159) },
+ { "vec_vctsxs", 1, 2, O_vec_vctsxs, O_UID(160) },
+ { "vec_vctuxs", 1, 2, O_vec_vctuxs, O_UID(161) },
+ { "vec_vexptefp", 1, 1, O_vec_vexptefp, O_UID(162) },
+ { "vec_vlogefp", 1, 1, O_vec_vlogefp, O_UID(163) },
+ { "vec_vmaddfp", 1, 3, O_vec_vmaddfp, O_UID(164) },
+ { "vec_vmaxfp", 1, 2, O_vec_vmaxfp, O_UID(165) },
+ { "vec_vmaxsb", 3, 2, O_vec_vmaxsb, O_UID(166) },
+ { "vec_vmaxsh", 3, 2, O_vec_vmaxsh, O_UID(167) },
+ { "vec_vmaxsw", 3, 2, O_vec_vmaxsw, O_UID(168) },
+ { "vec_vmaxub", 3, 2, O_vec_vmaxub, O_UID(169) },
+ { "vec_vmaxuh", 3, 2, O_vec_vmaxuh, O_UID(170) },
+ { "vec_vmaxuw", 3, 2, O_vec_vmaxuw, O_UID(171) },
+ { "vec_vmhaddshs", 1, 3, O_vec_vmhaddshs, O_UID(172) },
+ { "vec_vmhraddshs", 1, 3, O_vec_vmhraddshs, O_UID(173) },
+ { "vec_vminfp", 1, 2, O_vec_vminfp, O_UID(174) },
+ { "vec_vminsb", 3, 2, O_vec_vminsb, O_UID(175) },
+ { "vec_vminsh", 3, 2, O_vec_vminsh, O_UID(176) },
+ { "vec_vminsw", 3, 2, O_vec_vminsw, O_UID(177) },
+ { "vec_vminub", 3, 2, O_vec_vminub, O_UID(178) },
+ { "vec_vminuh", 3, 2, O_vec_vminuh, O_UID(179) },
+ { "vec_vminuw", 3, 2, O_vec_vminuw, O_UID(180) },
+ { "vec_vmladduhm", 4, 3, O_vec_vmladduhm, O_UID(181) },
+ { "vec_vmrghb", 3, 2, O_vec_vmrghb, O_UID(182) },
+ { "vec_vmrghh", 4, 2, O_vec_vmrghh, O_UID(183) },
+ { "vec_vmrghw", 4, 2, O_vec_vmrghw, O_UID(184) },
+ { "vec_vmrglb", 3, 2, O_vec_vmrglb, O_UID(185) },
+ { "vec_vmrglh", 4, 2, O_vec_vmrglh, O_UID(186) },
+ { "vec_vmrglw", 4, 2, O_vec_vmrglw, O_UID(187) },
+ { "vec_vmsummbm", 1, 3, O_vec_vmsummbm, O_UID(188) },
+ { "vec_vmsumshm", 1, 3, O_vec_vmsumshm, O_UID(189) },
+ { "vec_vmsumshs", 1, 3, O_vec_vmsumshs, O_UID(190) },
+ { "vec_vmsumubm", 1, 3, O_vec_vmsumubm, O_UID(191) },
+ { "vec_vmsumuhm", 1, 3, O_vec_vmsumuhm, O_UID(192) },
+ { "vec_vmsumuhs", 1, 3, O_vec_vmsumuhs, O_UID(193) },
+ { "vec_vmulesb", 1, 2, O_vec_vmulesb, O_UID(194) },
+ { "vec_vmulesh", 1, 2, O_vec_vmulesh, O_UID(195) },
+ { "vec_vmuleub", 1, 2, O_vec_vmuleub, O_UID(196) },
+ { "vec_vmuleuh", 1, 2, O_vec_vmuleuh, O_UID(197) },
+ { "vec_vmulosb", 1, 2, O_vec_vmulosb, O_UID(198) },
+ { "vec_vmulosh", 1, 2, O_vec_vmulosh, O_UID(199) },
+ { "vec_vmuloub", 1, 2, O_vec_vmuloub, O_UID(200) },
+ { "vec_vmulouh", 1, 2, O_vec_vmulouh, O_UID(201) },
+ { "vec_vnmsubfp", 1, 3, O_vec_vnmsubfp, O_UID(202) },
+ { "vec_vnor", 10, 2, O_vec_vnor, O_UID(203) },
+ { "vec_vor", 24, 2, O_vec_vor, O_UID(204) },
+ { "vec_vperm", 11, 3, O_vec_vperm, O_UID(205) },
+ { "vec_vpkpx", 1, 2, O_vec_vpkpx, O_UID(206) },
+ { "vec_vpkshss", 1, 2, O_vec_vpkshss, O_UID(207) },
+ { "vec_vpkshus", 1, 2, O_vec_vpkshus, O_UID(208) },
+ { "vec_vpkswss", 1, 2, O_vec_vpkswss, O_UID(209) },
+ { "vec_vpkswus", 1, 2, O_vec_vpkswus, O_UID(210) },
+ { "vec_vpkuhum", 3, 2, O_vec_vpkuhum, O_UID(211) },
+ { "vec_vpkuhus", 1, 2, O_vec_vpkuhus, O_UID(212) },
+ { "vec_vpkuwum", 3, 2, O_vec_vpkuwum, O_UID(213) },
+ { "vec_vpkuwus", 1, 2, O_vec_vpkuwus, O_UID(214) },
+ { "vec_vrefp", 1, 1, O_vec_vrefp, O_UID(215) },
+ { "vec_vrfim", 1, 1, O_vec_vrfim, O_UID(216) },
+ { "vec_vrfin", 1, 1, O_vec_vrfin, O_UID(217) },
+ { "vec_vrfip", 1, 1, O_vec_vrfip, O_UID(218) },
+ { "vec_vrfiz", 1, 1, O_vec_vrfiz, O_UID(219) },
+ { "vec_vrlb", 2, 2, O_vec_vrlb, O_UID(220) },
+ { "vec_vrlh", 2, 2, O_vec_vrlh, O_UID(221) },
+ { "vec_vrlw", 2, 2, O_vec_vrlw, O_UID(222) },
+ { "vec_vrsqrtefp", 1, 1, O_vec_vrsqrtefp, O_UID(223) },
+ { "vec_vsel", 20, 3, O_vec_vsel, O_UID(224) },
+ { "vec_vsl", 30, 2, O_vec_vsl, O_UID(225) },
+ { "vec_vslb", 2, 2, O_vec_vslb, O_UID(226) },
+ { "vec_vsldoi", 11, 3, O_vec_vsldoi, O_UID(227) },
+ { "vec_vslh", 2, 2, O_vec_vslh, O_UID(228) },
+ { "vec_vslo", 16, 2, O_vec_vslo, O_UID(229) },
+ { "vec_vslw", 2, 2, O_vec_vslw, O_UID(230) },
+ { "vec_vspltb", 3, 2, O_vec_vspltb, O_UID(231) },
+ { "vec_vsplth", 4, 2, O_vec_vsplth, O_UID(232) },
+ { "vec_vspltisb", 1, 1, O_vec_vspltisb, O_UID(233) },
+ { "vec_vspltish", 1, 1, O_vec_vspltish, O_UID(234) },
+ { "vec_vspltisw", 1, 1, O_vec_vspltisw, O_UID(235) },
+ { "vec_vspltw", 4, 2, O_vec_vspltw, O_UID(236) },
+ { "vec_vsr", 30, 2, O_vec_vsr, O_UID(237) },
+ { "vec_vsrab", 2, 2, O_vec_vsrab, O_UID(238) },
+ { "vec_vsrah", 2, 2, O_vec_vsrah, O_UID(239) },
+ { "vec_vsraw", 2, 2, O_vec_vsraw, O_UID(240) },
+ { "vec_vsrb", 2, 2, O_vec_vsrb, O_UID(241) },
+ { "vec_vsrh", 2, 2, O_vec_vsrh, O_UID(242) },
+ { "vec_vsro", 16, 2, O_vec_vsro, O_UID(243) },
+ { "vec_vsrw", 2, 2, O_vec_vsrw, O_UID(244) },
+ { "vec_vsubcuw", 1, 2, O_vec_vsubcuw, O_UID(245) },
+ { "vec_vsubfp", 1, 2, O_vec_vsubfp, O_UID(246) },
+ { "vec_vsubsbs", 3, 2, O_vec_vsubsbs, O_UID(247) },
+ { "vec_vsubshs", 3, 2, O_vec_vsubshs, O_UID(248) },
+ { "vec_vsubsws", 3, 2, O_vec_vsubsws, O_UID(249) },
+ { "vec_vsububm", 6, 2, O_vec_vsububm, O_UID(250) },
+ { "vec_vsububs", 3, 2, O_vec_vsububs, O_UID(251) },
+ { "vec_vsubuhm", 6, 2, O_vec_vsubuhm, O_UID(252) },
+ { "vec_vsubuhs", 3, 2, O_vec_vsubuhs, O_UID(253) },
+ { "vec_vsubuwm", 6, 2, O_vec_vsubuwm, O_UID(254) },
+ { "vec_vsubuws", 3, 2, O_vec_vsubuws, O_UID(255) },
+ { "vec_vsum2sws", 1, 2, O_vec_vsum2sws, O_UID(256) },
+ { "vec_vsum4sbs", 1, 2, O_vec_vsum4sbs, O_UID(257) },
+ { "vec_vsum4shs", 1, 2, O_vec_vsum4shs, O_UID(258) },
+ { "vec_vsum4ubs", 1, 2, O_vec_vsum4ubs, O_UID(259) },
+ { "vec_vsumsws", 1, 2, O_vec_vsumsws, O_UID(260) },
+ { "vec_vupkhpx", 1, 1, O_vec_vupkhpx, O_UID(261) },
+ { "vec_vupkhsb", 2, 1, O_vec_vupkhsb, O_UID(262) },
+ { "vec_vupkhsh", 2, 1, O_vec_vupkhsh, O_UID(263) },
+ { "vec_vupklpx", 1, 1, O_vec_vupklpx, O_UID(264) },
+ { "vec_vupklsb", 2, 1, O_vec_vupklsb, O_UID(265) },
+ { "vec_vupklsh", 2, 1, O_vec_vupklsh, O_UID(266) },
+ { "vec_vxor", 24, 2, O_vec_vxor, O_UID(267) },
+ { "vec_xor", 24, 2, O_vec_xor, O_UID(268) },
+ { NULL, 0, 0, NULL, 0 }
+};
+#define LAST_O_UID O_UID(269)
diff --git a/gcc/config/rs6000/vec.ops b/gcc/config/rs6000/vec.ops
new file mode 100644
index 00000000000..5ef80a2d6b8
--- /dev/null
+++ b/gcc/config/rs6000/vec.ops
@@ -0,0 +1,1025 @@
+# APPLE LOCAL file AltiVec
+# ops-to-gp -gcc vec.ops builtin.ops
+vec_abs vec_s8 = vec_s8 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 1 FALSE FALSE transform_vec_abs
+vec_abs vec_s16 = vec_s16 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 2 FALSE FALSE transform_vec_abs
+vec_abs vec_s32 = vec_s32 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 3 FALSE FALSE transform_vec_abs
+vec_abs vec_f32 = vec_f32 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 4 FALSE FALSE transform_vec_abs
+vec_abss vec_s8 = vec_s8 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 5 FALSE FALSE transform_vec_abs
+vec_abss vec_s16 = vec_s16 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 6 FALSE FALSE transform_vec_abs
+vec_abss vec_s32 = vec_s32 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 7 FALSE FALSE transform_vec_abs
+vec_cmplt vec_u8 vec_u8 = vec_b8 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtub FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_u16 vec_u16 = vec_b16 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuh FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_u32 vec_u32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuw FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_s8 vec_s8 = vec_b8 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsb FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_s16 vec_s16 = vec_b16 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsh FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_s32 vec_s32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsw FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_f32 vec_f32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfp FALSE FALSE transform_vec_cmp_reverse
+vec_cmple vec_f32 vec_f32 = vec_b32 vec_cmple BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefp FALSE FALSE transform_vec_cmp_reverse
+vec_add vec_s8 vec_s8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s8 vec_b8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b8 vec_s8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s16 vec_s16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s16 vec_b16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b16 vec_s16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s32 vec_s32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s32 vec_b32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b32 vec_s32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u8 vec_u8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u8 vec_b8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b8 vec_u8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u16 vec_u16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u16 vec_b16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b16 vec_u16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u32 vec_u32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u32 vec_b32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b32 vec_u32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s8 vec_s8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s8 vec_b8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b8 vec_s8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s16 vec_s16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s16 vec_b16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b16 vec_s16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s32 vec_s32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s32 vec_b32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b32 vec_s32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u8 vec_u8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u8 vec_b8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b8 vec_u8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u16 vec_u16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u16 vec_b16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b16 vec_u16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u32 vec_u32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u32 vec_b32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b32 vec_u32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s8 vec_s8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s8 vec_b8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b8 vec_s8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s16 vec_s16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s16 vec_b16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b16 vec_s16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s32 vec_s32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s32 vec_b32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b32 vec_s32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u8 vec_u8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u8 vec_b8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b8 vec_u8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u16 vec_u16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u16 vec_b16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b16 vec_u16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u32 vec_u32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u32 vec_b32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b32 vec_u32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s8 vec_s8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s8 vec_b8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b8 vec_s8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s16 vec_s16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s16 vec_b16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b16 vec_s16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s32 vec_s32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s32 vec_b32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b32 vec_s32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u8 vec_u8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u8 vec_b8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b8 vec_u8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u16 vec_u16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u16 vec_b16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b16 vec_u16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u32 vec_u32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u32 vec_b32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b32 vec_u32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_addc vec_u32 vec_u32 = vec_u32 vec_vaddcuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subc vec_u32 vec_u32 = vec_u32 vec_vsubcuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_u8 vec_u8 = vec_u16 vec_vmuloub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_u16 vec_u16 = vec_u32 vec_vmulouh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_s8 vec_s8 = vec_s16 vec_vmulosb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_s16 vec_s16 = vec_s32 vec_vmulosh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_u8 vec_u8 = vec_u16 vec_vmuleub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_u16 vec_u16 = vec_u32 vec_vmuleuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_s8 vec_s8 = vec_s16 vec_vmulesb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_s16 vec_s16 = vec_s32 vec_vmulesh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_u16 vec_u16 vec_u16 = vec_u16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_s16 vec_u16 vec_u16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_u16 vec_s16 vec_s16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_madds vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmhaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mradds vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmhraddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_s8 vec_u8 vec_s32 = vec_s32 vec_vmsummbm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_u8 vec_u8 vec_u32 = vec_u32 vec_vmsumubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_s16 vec_s16 vec_s32 = vec_s32 vec_vmsumshm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_u16 vec_u16 vec_u32 = vec_u32 vec_vmsumuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msums vec_s16 vec_s16 vec_s32 = vec_s32 vec_vmsumshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msums vec_u16 vec_u16 vec_u32 = vec_u32 vec_vmsumuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sums vec_s32 vec_s32 = vec_s32 vec_vsumsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum2s vec_s32 vec_s32 = vec_s32 vec_vsum2sws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum4s vec_s8 vec_s32 = vec_s32 vec_vsum4sbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum4s vec_u8 vec_u32 = vec_u32 vec_vsum4ubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum4s vec_s16 vec_s32 = vec_s32 vec_vsum4shs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_s8 vec_s8 = vec_s8 vec_vavgsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_s16 vec_s16 = vec_s16 vec_vavgsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_u8 vec_u8 = vec_u8 vec_vavgub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_u16 vec_u16 = vec_u16 vec_vavguh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_s32 vec_s32 = vec_s32 vec_vavgsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_u32 vec_u32 = vec_u32 vec_vavguw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s8 vec_s8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s8 vec_b8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b8 vec_s8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u8 vec_u8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b8 vec_u8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u8 vec_b8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b8 vec_b8 = vec_b8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s16 vec_s16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s16 vec_b16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b16 vec_s16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u16 vec_u16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b16 vec_u16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u16 vec_b16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b16 vec_b16 = vec_b16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s32 vec_s32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s32 vec_b32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_s32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u32 vec_u32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_u32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u32 vec_b32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_b32 = vec_b32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_f32 vec_f32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_f32 vec_b32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_f32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s8 vec_s8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s8 vec_b8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b8 vec_s8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u8 vec_u8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b8 vec_u8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u8 vec_b8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b8 vec_b8 = vec_b8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s16 vec_s16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s16 vec_b16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b16 vec_s16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u16 vec_u16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b16 vec_u16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u16 vec_b16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b16 vec_b16 = vec_b16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s32 vec_s32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s32 vec_b32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_s32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u32 vec_u32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_u32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u32 vec_b32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_b32 = vec_b32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_f32 vec_f32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_f32 vec_b32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_f32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s8 vec_s8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s8 vec_b8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b8 vec_s8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u8 vec_u8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b8 vec_u8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u8 vec_b8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b8 vec_b8 = vec_b8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s16 vec_s16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s16 vec_b16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b16 vec_s16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u16 vec_u16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b16 vec_u16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u16 vec_b16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b16 vec_b16 = vec_b16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s32 vec_s32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s32 vec_b32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_s32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u32 vec_u32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_u32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u32 vec_b32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_b32 = vec_b32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_f32 vec_f32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_f32 vec_b32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_f32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s8 vec_s8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s8 vec_b8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b8 vec_s8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u8 vec_u8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b8 vec_u8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u8 vec_b8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b8 vec_b8 = vec_b8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s16 vec_s16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s16 vec_b16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b16 vec_s16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u16 vec_u16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b16 vec_u16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u16 vec_b16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b16 vec_b16 = vec_b16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s32 vec_s32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s32 vec_b32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_s32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u32 vec_u32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_u32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u32 vec_b32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_b32 = vec_b32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_f32 vec_f32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_f32 vec_b32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_f32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_u8 vec_u8 = vec_u8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_s8 vec_s8 = vec_s8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_b8 vec_b8 = vec_b8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_u16 vec_u16 = vec_u16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_s16 vec_s16 = vec_s16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_b16 vec_b16 = vec_b16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_u32 vec_u32 = vec_u32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_s32 vec_s32 = vec_s32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_b32 vec_b32 = vec_b32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_f32 vec_f32 = vec_f32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_u8 vec_u8 = vec_u8 vec_vrlb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_u16 vec_u16 = vec_u16 vec_vrlh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_u32 vec_u32 = vec_u32 vec_vrlw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_s8 vec_u8 = vec_s8 vec_vrlb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_s16 vec_u16 = vec_s16 vec_vrlh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_s32 vec_u32 = vec_s32 vec_vrlw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_u8 vec_u8 = vec_u8 vec_vslb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_u16 vec_u16 = vec_u16 vec_vslh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_u32 vec_u32 = vec_u32 vec_vslw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_s8 vec_u8 = vec_s8 vec_vslb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_s16 vec_u16 = vec_s16 vec_vslh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_s32 vec_u32 = vec_s32 vec_vslw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u8 vec_u8 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u16 vec_u8 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u32 vec_u8 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s8 vec_u8 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s16 vec_u8 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s32 vec_u8 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b8 vec_u8 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b16 vec_u8 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b32 vec_u8 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_p16 vec_u8 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u8 vec_u16 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u16 vec_u16 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u32 vec_u16 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s8 vec_u16 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s16 vec_u16 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s32 vec_u16 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b8 vec_u16 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b16 vec_u16 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b32 vec_u16 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_p16 vec_u16 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u8 vec_u32 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u16 vec_u32 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u32 vec_u32 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s8 vec_u32 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s16 vec_u32 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s32 vec_u32 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b8 vec_u32 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b16 vec_u32 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b32 vec_u32 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_p16 vec_u32 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_u8 vec_u8 = vec_u8 vec_vsrb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_u16 vec_u16 = vec_u16 vec_vsrh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_u32 vec_u32 = vec_u32 vec_vsrw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_s8 vec_u8 = vec_s8 vec_vsrb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_s16 vec_u16 = vec_s16 vec_vsrh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_s32 vec_u32 = vec_s32 vec_vsrw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_u8 vec_u8 = vec_u8 vec_vsrab BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_u16 vec_u16 = vec_u16 vec_vsrah BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_u32 vec_u32 = vec_u32 vec_vsraw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_s8 vec_u8 = vec_s8 vec_vsrab BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_s16 vec_u16 = vec_s16 vec_vsrah BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_s32 vec_u32 = vec_s32 vec_vsraw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u8 vec_u8 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u16 vec_u8 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u32 vec_u8 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s8 vec_u8 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s16 vec_u8 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s32 vec_u8 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b8 vec_u8 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b16 vec_u8 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b32 vec_u8 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_p16 vec_u8 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u8 vec_u16 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u16 vec_u16 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u32 vec_u16 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s8 vec_u16 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s16 vec_u16 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s32 vec_u16 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b8 vec_u16 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b16 vec_u16 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b32 vec_u16 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_p16 vec_u16 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u8 vec_u32 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u16 vec_u32 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u32 vec_u32 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s8 vec_u32 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s16 vec_u32 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s32 vec_u32 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b8 vec_u32 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b16 vec_u32 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b32 vec_u32 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_p16 vec_u32 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_u8 vec_u8 = vec_b8 vec_vcmpgtub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_u16 vec_u16 = vec_b16 vec_vcmpgtuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_u32 vec_u32 = vec_b32 vec_vcmpgtuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_s8 vec_s8 = vec_b8 vec_vcmpgtsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_s16 vec_s16 = vec_b16 vec_vcmpgtsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_s32 vec_s32 = vec_b32 vec_vcmpgtsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_u8 vec_u8 = vec_b8 vec_vcmpequb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_u16 vec_u16 = vec_b16 vec_vcmpequh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_u32 vec_u32 = vec_b32 vec_vcmpequw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_s8 vec_s8 = vec_b8 vec_vcmpequb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_s16 vec_s16 = vec_b16 vec_vcmpequh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_s32 vec_s32 = vec_b32 vec_vcmpequw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b8 vec_b8 vec_b8 = vec_b8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b8 vec_b8 vec_u8 = vec_b8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u8 vec_u8 vec_u8 = vec_u8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u8 vec_u8 vec_b8 = vec_u8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s8 vec_s8 vec_u8 = vec_s8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s8 vec_s8 vec_b8 = vec_s8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b16 vec_b16 vec_b16 = vec_b16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b16 vec_b16 vec_u16 = vec_b16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u16 vec_u16 vec_u16 = vec_u16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u16 vec_u16 vec_b16 = vec_u16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s16 vec_s16 vec_u16 = vec_s16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s16 vec_s16 vec_b16 = vec_s16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b32 vec_b32 vec_b32 = vec_b32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b32 vec_b32 vec_u32 = vec_b32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u32 vec_u32 vec_u32 = vec_u32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u32 vec_u32 vec_b32 = vec_u32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s32 vec_s32 vec_u32 = vec_s32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s32 vec_s32 vec_b32 = vec_s32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_f32 vec_f32 vec_b32 = vec_f32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_f32 vec_f32 vec_u32 = vec_f32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_u16 vec_u16 = vec_u8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_u32 vec_u32 = vec_u16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_s16 vec_s16 = vec_s8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_s32 vec_s32 = vec_s16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_b16 vec_b16 = vec_b8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_b32 vec_b32 = vec_b16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_u16 vec_u16 = vec_u8 vec_vpkuhus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_u32 vec_u32 = vec_u16 vec_vpkuwus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_u16 vec_u16 = vec_u8 vec_vpkuhus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_u32 vec_u32 = vec_u16 vec_vpkuwus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_s16 vec_s16 = vec_s8 vec_vpkshss BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_s32 vec_s32 = vec_s16 vec_vpkswss BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_s16 vec_s16 = vec_u8 vec_vpkshus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_s32 vec_s32 = vec_u16 vec_vpkswus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packpx vec_u32 vec_u32 = vec_p16 vec_vpkpx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_s8 = vec_s16 vec_vupkhsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_s16 = vec_s32 vec_vupkhsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_b8 = vec_b16 vec_vupkhsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_b16 = vec_b32 vec_vupkhsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_p16 = vec_u32 vec_vupkhpx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_s8 = vec_s16 vec_vupklsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_s16 = vec_s32 vec_vupklsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_b8 = vec_b16 vec_vupklsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_b16 = vec_b32 vec_vupklsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_p16 = vec_u32 vec_vupklpx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_u8 vec_u8 = vec_u8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_u16 vec_u16 = vec_u16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_u32 vec_u32 = vec_u32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_s8 vec_s8 = vec_s8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_s16 vec_s16 = vec_s16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_s32 vec_s32 = vec_s32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_f32 vec_f32 = vec_f32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_p16 vec_p16 = vec_p16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_b8 vec_b8 = vec_b8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_b16 vec_b16 = vec_b16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_b32 vec_b32 = vec_b32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpack2uh vec_u8 vec_u8 = vec_u16 vec_unpack2uh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghb
+vec_unpack2uh vec_u16 vec_u16 = vec_u32 vec_unpack2uh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghh
+vec_unpack2sh vec_u8 vec_u8 = vec_s16 vec_unpack2sh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghb
+vec_unpack2sh vec_u16 vec_u16 = vec_s32 vec_unpack2sh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghh
+vec_mergel vec_u8 vec_u8 = vec_u8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_u16 vec_u16 = vec_u16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_u32 vec_u32 = vec_u32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_s8 vec_s8 = vec_s8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_s16 vec_s16 = vec_s16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_s32 vec_s32 = vec_s32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_f32 vec_f32 = vec_f32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_p16 vec_p16 = vec_p16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_b8 vec_b8 = vec_b8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_b16 vec_b16 = vec_b16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_b32 vec_b32 = vec_b32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpack2ul vec_u8 vec_u8 = vec_u16 vec_unpack2ul BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglb
+vec_unpack2ul vec_u16 vec_u16 = vec_u32 vec_unpack2ul BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglh
+vec_unpack2sl vec_u8 vec_u8 = vec_s16 vec_unpack2sl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglb
+vec_unpack2sl vec_u16 vec_u16 = vec_s32 vec_unpack2sl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglh
+vec_splat vec_u8 immed_u5 = vec_u8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_u16 immed_u5 = vec_u16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_u32 immed_u5 = vec_u32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_s8 immed_u5 = vec_s8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_s16 immed_u5 = vec_s16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_s32 immed_u5 = vec_s32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_b8 immed_u5 = vec_b8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_b16 immed_u5 = vec_b16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_b32 immed_u5 = vec_b32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_p16 immed_u5 = vec_p16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_f32 immed_u5 = vec_f32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_s8 immed_s5 = vec_s8 vec_vspltisb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_s16 immed_s5 = vec_s16 vec_vspltish BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_s32 immed_s5 = vec_s32 vec_vspltisw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_u8 immed_s5 = vec_u8 vec_splat_u8 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltisb
+vec_splat_u16 immed_s5 = vec_u16 vec_splat_u16 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltish
+vec_splat_u32 immed_s5 = vec_u32 vec_splat_u32 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltisw
+vec_perm vec_u8 vec_u8 vec_u8 = vec_u8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_u16 vec_u16 vec_u8 = vec_u16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_u32 vec_u32 vec_u8 = vec_u32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_s8 vec_s8 vec_u8 = vec_s8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_s16 vec_s16 vec_u8 = vec_s16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_s32 vec_s32 vec_u8 = vec_s32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_b8 vec_b8 vec_u8 = vec_b8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_b16 vec_b16 vec_u8 = vec_b16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_b32 vec_b32 vec_u8 = vec_b32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_p16 vec_p16 vec_u8 = vec_p16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_f32 vec_f32 vec_u8 = vec_f32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_u8 vec_u8 immed_u4 = vec_u8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_u16 vec_u16 immed_u4 = vec_u16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_u32 vec_u32 immed_u4 = vec_u32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_s8 vec_s8 immed_u4 = vec_s8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_s16 vec_s16 immed_u4 = vec_s16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_s32 vec_s32 immed_u4 = vec_s32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_p16 vec_p16 immed_u4 = vec_p16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_f32 vec_f32 immed_u4 = vec_f32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_b8 vec_b8 immed_u4 = vec_b8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_b16 vec_b16 immed_u4 = vec_b16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_b32 vec_b32 immed_u4 = vec_b32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u8 vec_u8 = vec_u8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u16 vec_u8 = vec_u16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u32 vec_u8 = vec_u32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s8 vec_u8 = vec_s8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s16 vec_u8 = vec_s16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s32 vec_u8 = vec_s32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_p16 vec_u8 = vec_p16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u8 vec_s8 = vec_u8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u16 vec_s8 = vec_u16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u32 vec_s8 = vec_u32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s8 vec_s8 = vec_s8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s16 vec_s8 = vec_s16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s32 vec_s8 = vec_s32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_p16 vec_s8 = vec_p16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_f32 vec_u8 = vec_f32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_f32 vec_s8 = vec_f32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u8 vec_u8 = vec_u8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u16 vec_u8 = vec_u16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u32 vec_u8 = vec_u32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s8 vec_u8 = vec_s8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s16 vec_u8 = vec_s16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s32 vec_u8 = vec_s32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_p16 vec_u8 = vec_p16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u8 vec_s8 = vec_u8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u16 vec_s8 = vec_u16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u32 vec_s8 = vec_u32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s8 vec_s8 = vec_s8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s16 vec_s8 = vec_s16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s32 vec_s8 = vec_s32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_p16 vec_s8 = vec_p16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_f32 vec_u8 = vec_f32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_f32 vec_s8 = vec_f32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u8 vec_u8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u8 vec_b8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b8 vec_u8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s8 vec_s8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s8 vec_b8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b8 vec_s8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u16 vec_u16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u16 vec_b16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b16 vec_u16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s16 vec_s16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s16 vec_b16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b16 vec_s16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u32 vec_u32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u32 vec_b32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b32 vec_u32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s32 vec_s32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s32 vec_b32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b32 vec_s32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u8 vec_u8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u8 vec_b8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b8 vec_u8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s8 vec_s8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s8 vec_b8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b8 vec_s8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u16 vec_u16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u16 vec_b16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b16 vec_u16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s16 vec_s16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s16 vec_b16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b16 vec_s16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u32 vec_u32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u32 vec_b32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b32 vec_u32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s32 vec_s32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s32 vec_b32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b32 vec_s32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_lde int const_unsigned_char_ptr = vec_u8_load_op vec_lvebx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvebx TRUE FALSE
+vec_lde int const_unsigned_short_ptr = vec_u16_load_op vec_lvehx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvehx TRUE FALSE
+vec_lde int const_unsigned_int_ptr = vec_u32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_unsigned_long_ptr = vec_u32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_signed_char_ptr = vec_s8_load_op vec_lvebx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvebx TRUE FALSE
+vec_lde int const_short_ptr = vec_s16_load_op vec_lvehx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvehx TRUE FALSE
+vec_lde int const_int_ptr = vec_s32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_long_ptr = vec_s32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_float_ptr = vec_f32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_ld int const_unsigned_char_ptr = vec_u8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_unsigned_short_ptr = vec_u16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_unsigned_int_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_unsigned_long_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_signed_char_ptr = vec_s8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_short_ptr = vec_s16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_int_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_long_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_float_ptr = vec_f32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_char_ptr = vec_u8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_short_ptr = vec_u16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_int_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_long_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_signed_char_ptr = vec_s8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_short_ptr = vec_s16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_int_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_long_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_float_ptr = vec_f32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ld int const_vec_u8_ptr = vec_u8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_u16_ptr = vec_u16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_u32_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_s8_ptr = vec_s8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_s16_ptr = vec_s16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_s32_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_p16_ptr = vec_p16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_b8_ptr = vec_b8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_b16_ptr = vec_b16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_b32_ptr = vec_b32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_f32_ptr = vec_f32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ldl int const_vec_u8_ptr = vec_u8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_u16_ptr = vec_u16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_u32_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_s8_ptr = vec_s8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_s16_ptr = vec_s16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_s32_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_p16_ptr = vec_p16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_b8_ptr = vec_b8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_b16_ptr = vec_b16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_b32_ptr = vec_b32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_f32_ptr = vec_f32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ste vec_u8 int unsigned_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_u16 int unsigned_short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_u32 int unsigned_int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_u32 int unsigned_long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s8 int signed_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s16 int short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s32 int int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s32 int long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_f32 int float_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_p16 int short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_p16 int unsigned_short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b8 int unsigned_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b8 int signed_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b16 int unsigned_short_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b16 int short_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int unsigned_int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int unsigned_long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_st vec_u8 int unsigned_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u32 int unsigned_int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u32 int unsigned_long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s8 int signed_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s32 int int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s32 int long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_f32 int float_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_p16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_p16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b8 int unsigned_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b8 int signed_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int unsigned_int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int unsigned_long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_stl vec_u8 int unsigned_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u32 int unsigned_int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u32 int unsigned_long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s8 int signed_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s32 int int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s32 int long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_f32 int float_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_p16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_p16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b8 int unsigned_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b8 int signed_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int unsigned_int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int unsigned_long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_st vec_u8 int vec_u8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u16 int vec_u16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u32 int vec_u32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s8 int vec_s8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s16 int vec_s16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s32 int vec_s32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b8 int vec_b8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b16 int vec_b16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int vec_b32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_p16 int vec_p16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_f32 int vec_f32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_stl vec_u8 int vec_u8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u16 int vec_u16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u32 int vec_u32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s8 int vec_s8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s16 int vec_s16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s32 int vec_s32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b8 int vec_b8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b16 int vec_b16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int vec_b32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_p16 int vec_p16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_f32 int vec_f32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_lvsl int const_volatile_unsigned_char_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_unsigned_short_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_unsigned_int_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_unsigned_long_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_signed_char_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_short_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_int_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_long_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_float_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsr int const_volatile_unsigned_char_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_unsigned_short_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_unsigned_int_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_unsigned_long_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_signed_char_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_short_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_int_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_long_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_float_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_mfvscr = volatile_vec_u16 vec_mfvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_u8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_u16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_u32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_s8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_s16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_s32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_b8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_b16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_b32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_p16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_dst const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_short_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_int_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_long_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_float_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dstt const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_short_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_int_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_long_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_float_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstst const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_short_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_int_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_long_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_float_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dststt const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_short_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_int_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_long_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_float_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dst const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dstt const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstst const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dststt const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dss immed_u2 = volatile_void_load_op vec_dss BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_dssall = volatile_void_load_op vec_dssall BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_f32 vec_f32 = vec_f32 vec_vaddfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_f32 vec_f32 = vec_f32 vec_vsubfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_madd vec_f32 vec_f32 vec_f32 = vec_f32 vec_vmaddfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nmsub vec_f32 vec_f32 vec_f32 = vec_f32 vec_vnmsubfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_f32 vec_f32 = vec_b32 vec_vcmpgtfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpge vec_f32 vec_f32 = vec_b32 vec_vcmpgefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_f32 vec_f32 = vec_b32 vec_vcmpeqfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpb vec_f32 vec_f32 = vec_s32 vec_vcmpbfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_f32 vec_f32 = vec_f32 vec_vmaxfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_f32 vec_f32 = vec_f32 vec_vminfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_re vec_f32 = vec_f32 vec_vrefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rsqrte vec_f32 = vec_f32 vec_vrsqrtefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_loge vec_f32 = vec_f32 vec_vlogefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_expte vec_f32 = vec_f32 vec_vexptefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_trunc vec_f32 = vec_f32 vec_vrfiz BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_round vec_f32 = vec_f32 vec_vrfin BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ceil vec_f32 = vec_f32 vec_vrfip BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_floor vec_f32 = vec_f32 vec_vrfim BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ctf vec_u32 immed_u5 = vec_f32 vec_vcfux BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ctf vec_s32 immed_u5 = vec_f32 vec_vcfsx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ctu vec_f32 immed_u5 = vec_u32 vec_vctuxs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cts vec_f32 immed_u5 = vec_s32 vec_vctsxs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_all_gt vec_u8 vec_u8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_gt vec_u8 vec_b8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_gt vec_b8 vec_u8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_le vec_u8 vec_u8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_le vec_u8 vec_b8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_le vec_b8 vec_u8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_gt vec_u8 vec_u8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_gt vec_u8 vec_b8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_gt vec_b8 vec_u8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_le vec_u8 vec_u8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_le vec_u8 vec_b8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_le vec_b8 vec_u8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_gt vec_s8 vec_s8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_gt vec_s8 vec_b8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_gt vec_b8 vec_s8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_le vec_s8 vec_s8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_le vec_s8 vec_b8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_le vec_b8 vec_s8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_gt vec_s8 vec_s8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_gt vec_s8 vec_b8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_gt vec_b8 vec_s8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_le vec_s8 vec_s8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_le vec_s8 vec_b8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_le vec_b8 vec_s8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_lt vec_u8 vec_u8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_lt vec_u8 vec_b8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_lt vec_b8 vec_u8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_ge vec_u8 vec_u8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_ge vec_u8 vec_b8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_ge vec_b8 vec_u8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_lt vec_u8 vec_u8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_lt vec_u8 vec_b8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_lt vec_b8 vec_u8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_ge vec_u8 vec_u8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_ge vec_u8 vec_b8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_ge vec_b8 vec_u8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_lt vec_s8 vec_s8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_lt vec_s8 vec_b8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_lt vec_b8 vec_s8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_ge vec_s8 vec_s8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_ge vec_s8 vec_b8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_ge vec_b8 vec_s8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_lt vec_s8 vec_s8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_lt vec_s8 vec_b8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_lt vec_b8 vec_s8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_ge vec_s8 vec_s8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_ge vec_s8 vec_b8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_ge vec_b8 vec_s8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_gt vec_u16 vec_u16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_gt vec_u16 vec_b16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_gt vec_b16 vec_u16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_le vec_u16 vec_u16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_le vec_u16 vec_b16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_le vec_b16 vec_u16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_gt vec_u16 vec_u16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_gt vec_u16 vec_b16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_gt vec_b16 vec_u16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_le vec_u16 vec_u16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_le vec_u16 vec_b16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_le vec_b16 vec_u16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_gt vec_s16 vec_s16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_gt vec_s16 vec_b16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_gt vec_b16 vec_s16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_le vec_s16 vec_s16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_le vec_s16 vec_b16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_le vec_b16 vec_s16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_gt vec_s16 vec_s16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_gt vec_s16 vec_b16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_gt vec_b16 vec_s16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_le vec_s16 vec_s16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_le vec_s16 vec_b16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_le vec_b16 vec_s16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_lt vec_u16 vec_u16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_lt vec_u16 vec_b16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_lt vec_b16 vec_u16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_ge vec_u16 vec_u16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_ge vec_u16 vec_b16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_ge vec_b16 vec_u16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_lt vec_u16 vec_u16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_lt vec_u16 vec_b16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_lt vec_b16 vec_u16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_ge vec_u16 vec_u16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_ge vec_u16 vec_b16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_ge vec_b16 vec_u16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_lt vec_s16 vec_s16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_lt vec_s16 vec_b16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_lt vec_b16 vec_s16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_ge vec_s16 vec_s16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_ge vec_s16 vec_b16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_ge vec_b16 vec_s16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_lt vec_s16 vec_s16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_lt vec_s16 vec_b16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_lt vec_b16 vec_s16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_ge vec_s16 vec_s16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_ge vec_s16 vec_b16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_ge vec_b16 vec_s16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_gt vec_u32 vec_u32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_gt vec_u32 vec_b32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_gt vec_b32 vec_u32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_le vec_u32 vec_u32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_le vec_u32 vec_b32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_le vec_b32 vec_u32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_gt vec_u32 vec_u32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_gt vec_u32 vec_b32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_gt vec_b32 vec_u32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_le vec_u32 vec_u32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_le vec_u32 vec_b32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_le vec_b32 vec_u32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_gt vec_s32 vec_s32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_gt vec_s32 vec_b32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_gt vec_b32 vec_s32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_le vec_s32 vec_s32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_le vec_s32 vec_b32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_le vec_b32 vec_s32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_gt vec_s32 vec_s32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_gt vec_s32 vec_b32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_gt vec_b32 vec_s32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_le vec_s32 vec_s32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_le vec_s32 vec_b32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_le vec_b32 vec_s32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_lt vec_u32 vec_u32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_lt vec_u32 vec_b32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_lt vec_b32 vec_u32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_ge vec_u32 vec_u32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_ge vec_u32 vec_b32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_ge vec_b32 vec_u32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_lt vec_u32 vec_u32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_lt vec_u32 vec_b32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_lt vec_b32 vec_u32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_ge vec_u32 vec_u32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_ge vec_u32 vec_b32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_ge vec_b32 vec_u32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_lt vec_s32 vec_s32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_lt vec_s32 vec_b32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_lt vec_b32 vec_s32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_ge vec_s32 vec_s32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_ge vec_s32 vec_b32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_ge vec_b32 vec_s32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_lt vec_s32 vec_s32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_lt vec_s32 vec_b32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_lt vec_b32 vec_s32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_ge vec_s32 vec_s32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_ge vec_s32 vec_b32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_ge vec_b32 vec_s32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_eq vec_u8 vec_u8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_u8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_b8 vec_u8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_b8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_u8 vec_u8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_u8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_b8 vec_u8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_b8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_u8 vec_u8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_u8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_b8 vec_u8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_b8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_u8 vec_u8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_u8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_b8 vec_u8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_b8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_s8 vec_s8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_s8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_b8 vec_s8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_s8 vec_s8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_s8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_b8 vec_s8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_s8 vec_s8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_s8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_b8 vec_s8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_s8 vec_s8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_s8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_b8 vec_s8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_u16 vec_u16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_u16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_b16 vec_u16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_b16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_p16 vec_p16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_u16 vec_u16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_u16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_b16 vec_u16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_b16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_p16 vec_p16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_u16 vec_u16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_u16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_b16 vec_u16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_b16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_p16 vec_p16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_u16 vec_u16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_u16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_b16 vec_u16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_b16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_p16 vec_p16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_s16 vec_s16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_s16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_b16 vec_s16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_s16 vec_s16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_s16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_b16 vec_s16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_s16 vec_s16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_s16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_b16 vec_s16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_s16 vec_s16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_s16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_b16 vec_s16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_u32 vec_u32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_u32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_b32 vec_u32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_b32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_u32 vec_u32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_u32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_b32 vec_u32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_b32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_u32 vec_u32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_u32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_b32 vec_u32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_b32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_u32 vec_u32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_u32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_b32 vec_u32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_b32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_s32 vec_s32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_s32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_b32 vec_s32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_s32 vec_s32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_s32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_b32 vec_s32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_s32 vec_s32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_s32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_b32 vec_s32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_s32 vec_s32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_s32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_b32 vec_s32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_gt vec_f32 vec_f32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_ngt vec_f32 vec_f32 = cc26t vec_all_ngt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_ngt vec_f32 vec_f32 = cc24f vec_any_ngt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_gt vec_f32 vec_f32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_lt vec_f32 vec_f32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_nlt vec_f32 vec_f32 = cc26tr vec_all_nlt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_nlt vec_f32 vec_f32 = cc24fr vec_any_nlt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_lt vec_f32 vec_f32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_ge vec_f32 vec_f32 = cc24t vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_nge vec_f32 vec_f32 = cc26t vec_all_nge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_nge vec_f32 vec_f32 = cc24f vec_any_nge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_ge vec_f32 vec_f32 = cc26f vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_le vec_f32 vec_f32 = cc24tr vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_nle vec_f32 vec_f32 = cc26tr vec_all_nle BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_nle vec_f32 vec_f32 = cc24fr vec_any_nle BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_le vec_f32 vec_f32 = cc26fr vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_eq vec_f32 vec_f32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_ne vec_f32 vec_f32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_ne vec_f32 vec_f32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_eq vec_f32 vec_f32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_numeric vec_f32 = cc24td vec_all_numeric BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_nan vec_f32 = cc26td vec_all_nan BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_nan vec_f32 = cc24fd vec_any_nan BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_numeric vec_f32 = cc26fd vec_any_numeric BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_in vec_f32 vec_f32 = cc26t vec_all_in BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpbfpD
+vec_any_out vec_f32 vec_f32 = cc26f vec_any_out BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpbfpD
diff --git a/gcc/config/rs6000/x-darwin b/gcc/config/rs6000/x-darwin
index e133c21f928..f9197ab1ccb 100644
--- a/gcc/config/rs6000/x-darwin
+++ b/gcc/config/rs6000/x-darwin
@@ -1,4 +1,6 @@
-host-darwin.o : $(srcdir)/config/rs6000/host-darwin.c $(CONFIG_H) $(SYSTEM_H) \
- coretypes.h hosthooks.h hosthooks-def.h toplev.h diagnostic.h $(HOOKS_H)
- $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
- $(srcdir)/config/rs6000/host-darwin.c
+ # APPLE LOCAL begin mainline 2005-04-06 4071679
+host-ppc-darwin.o : $(srcdir)/config/rs6000/host-darwin.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h hosthooks.h $(HOSTHOOKS_DEF_H) toplev.h \
+ diagnostic.h config/host-darwin.h
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -o $@
+ # APPLE LOCAL end mainline 2005-04-06 4071679