aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/darwin-c.c225
-rw-r--r--gcc/config/darwin-driver.c989
-rw-r--r--gcc/config/darwin-protos.h35
-rw-r--r--gcc/config/darwin.c475
-rw-r--r--gcc/config/darwin.h341
-rw-r--r--gcc/config/h8300/t-rtems7
-rw-r--r--gcc/config/host-linux.c137
-rw-r--r--gcc/config/host-solaris.c79
-rw-r--r--gcc/config/i386/darwin.h68
-rw-r--r--gcc/config/i386/i386.c70
-rw-r--r--gcc/config/i386/i386.h14
-rw-r--r--gcc/config/i386/t-djgpp2
-rw-r--r--gcc/config/ia64/t-hpux43
-rw-r--r--gcc/config/mips/t-mips21
-rw-r--r--gcc/config/rs6000/altivec.h26
-rw-r--r--gcc/config/rs6000/builtin.ops297
-rw-r--r--gcc/config/rs6000/darwin-fpsave.asm69
-rw-r--r--gcc/config/rs6000/darwin-vecsave.asm133
-rw-r--r--gcc/config/rs6000/darwin-worldsave.asm233
-rw-r--r--gcc/config/rs6000/darwin.h142
-rwxr-xr-xgcc/config/rs6000/ops-to-gp620
-rw-r--r--gcc/config/rs6000/rs6000-c.c123
-rw-r--r--gcc/config/rs6000/rs6000-protos.h13
-rw-r--r--gcc/config/rs6000/rs6000.c924
-rw-r--r--gcc/config/rs6000/rs6000.h64
-rw-r--r--gcc/config/rs6000/rs6000.md263
-rw-r--r--gcc/config/rs6000/sysv4.h7
-rw-r--r--gcc/config/rs6000/t-darwin8
-rw-r--r--gcc/config/rs6000/t-rs60001
-rw-r--r--gcc/config/rs6000/t-rtems86
-rw-r--r--gcc/config/rs6000/vec.h4515
-rw-r--r--gcc/config/rs6000/vec.ops1025
-rw-r--r--gcc/config/t-darwin2
-rw-r--r--gcc/config/t-slibgcc-darwin3
-rw-r--r--gcc/config/x-linux4
-rw-r--r--gcc/config/x-solaris4
36 files changed, 10455 insertions, 613 deletions
diff --git a/gcc/config/darwin-c.c b/gcc/config/darwin-c.c
index a4c6d8bd1da..365ad2bb1c9 100644
--- a/gcc/config/darwin-c.c
+++ b/gcc/config/darwin-c.c
@@ -34,37 +34,74 @@ Boston, MA 02111-1307, USA. */
/* Pragmas. */
#define BAD(msgid) do { warning (msgid); return; } while (0)
+#define BAD2(msgid, arg) do { warning (msgid, arg); return; } while (0)
static bool using_frameworks = false;
+/* APPLE LOCAL CALL_ON_LOAD/CALL_ON_UNLOAD pragmas 20020202 turly */
+static void directive_with_named_function (const char *, void (*sec_f)(void));
+
/* Maintain a small stack of alignments. This is similar to pragma
pack's stack, but simpler. */
-static void push_field_alignment (int);
+/* APPLE LOCAL begin Macintosh alignment 2001-12-17 ff */
+static void push_field_alignment (int, int, int);
+/* APPLE LOCAL end Macintosh alignment 2001-12-17 ff */
static void pop_field_alignment (void);
static const char *find_subframework_file (const char *, const char *);
static void add_system_framework_path (char *);
static const char *find_subframework_header (cpp_reader *pfile, const char *header);
+/* APPLE LOCAL begin Macintosh alignment 2002-1-22 ff */
+/* There are four alignment modes supported on the Apple Macintosh
+ platform: power, mac68k, natural, and packed. These modes are
+ identified as follows:
+ if maximum_field_alignment != 0
+ mode = packed
+ else if TARGET_ALIGN_NATURAL
+ mode = natural
+ else if TARGET_ALIGN_MAC68K
+ mode
+ else
+ mode = power
+ These modes are saved on the alignment stack by saving the values
+ of maximum_field_alignment, TARGET_ALIGN_MAC68K, and
+ TARGET_ALIGN_NATURAL. */
typedef struct align_stack
{
int alignment;
+ unsigned long mac68k;
+ unsigned long natural;
struct align_stack * prev;
} align_stack;
+/* APPLE LOCAL end Macintosh alignment 2002-1-22 ff */
static struct align_stack * field_align_stack = NULL;
+/* APPLE LOCAL begin Macintosh alignment 2001-12-17 ff */
static void
-push_field_alignment (int bit_alignment)
+push_field_alignment (int bit_alignment,
+ int mac68k_alignment, int natural_alignment)
{
align_stack *entry = (align_stack *) xmalloc (sizeof (align_stack));
entry->alignment = maximum_field_alignment;
+ entry->mac68k = TARGET_ALIGN_MAC68K;
+ entry->natural = TARGET_ALIGN_NATURAL;
entry->prev = field_align_stack;
field_align_stack = entry;
maximum_field_alignment = bit_alignment;
+ if (mac68k_alignment)
+ rs6000_alignment_flags |= MASK_ALIGN_MAC68K;
+ else
+ rs6000_alignment_flags &= ~MASK_ALIGN_MAC68K;
+ if (natural_alignment)
+ rs6000_alignment_flags |= MASK_ALIGN_NATURAL;
+ else
+ rs6000_alignment_flags &= ~MASK_ALIGN_NATURAL;
}
+/* APPLE LOCAL end Macintosh alignment 2001-12-17 ff */
static void
pop_field_alignment (void)
@@ -74,6 +111,16 @@ pop_field_alignment (void)
align_stack *entry = field_align_stack;
maximum_field_alignment = entry->alignment;
+/* APPLE LOCAL begin Macintosh alignment 2001-12-17 ff */
+ if (entry->mac68k)
+ rs6000_alignment_flags |= MASK_ALIGN_MAC68K;
+ else
+ rs6000_alignment_flags &= ~MASK_ALIGN_MAC68K;
+ if (entry->natural)
+ rs6000_alignment_flags |= MASK_ALIGN_NATURAL;
+ else
+ rs6000_alignment_flags &= ~MASK_ALIGN_NATURAL;
+/* APPLE LOCAL end Macintosh alignment 2001-12-17 ff */
field_align_stack = entry->prev;
free (entry);
}
@@ -111,15 +158,85 @@ darwin_pragma_options (cpp_reader *pfile ATTRIBUTE_UNUSED)
warning ("junk at end of '#pragma options'");
arg = IDENTIFIER_POINTER (t);
+/* APPLE LOCAL begin Macintosh alignment 2002-1-22 ff */
if (!strcmp (arg, "mac68k"))
- push_field_alignment (16);
+ push_field_alignment (0, 1, 0);
+ else if (!strcmp (arg, "native")) /* equivalent to power on PowerPC */
+ push_field_alignment (0, 0, 0);
+ else if (!strcmp (arg, "natural"))
+ push_field_alignment (0, 0, 1);
+ else if (!strcmp (arg, "packed"))
+ push_field_alignment (8, 0, 0);
else if (!strcmp (arg, "power"))
- push_field_alignment (0);
+ push_field_alignment (0, 0, 0);
else if (!strcmp (arg, "reset"))
pop_field_alignment ();
else
- warning ("malformed '#pragma options align={mac68k|power|reset}', ignoring");
+ warning ("malformed '#pragma options align={mac68k|power|natural|reset}', ignoring");
+/* APPLE LOCAL end Macintosh alignment 2002-1-22 ff */
+}
+
+/* APPLE LOCAL begin Macintosh alignment 2002-1-22 ff */
+/* #pragma pack ()
+ #pragma pack (N)
+
+ We have a problem handling the semantics of these directives since,
+ to play well with the Macintosh alignment directives, we want the
+ usual pack(N) form to do a push of the previous alignment state.
+ Do we want pack() to do another push or a pop? */
+
+void
+darwin_pragma_pack (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ tree x;
+ int align = -1;
+ enum cpp_ttype token;
+ enum { set, push, pop } action;
+
+ if (c_lex (&x) != CPP_OPEN_PAREN)
+ BAD ("missing '(' after '#pragma pack' - ignored");
+ token = c_lex (&x);
+ if (token == CPP_CLOSE_PAREN)
+ {
+ action = pop; /* or "set" ??? */
+ align = 0;
+ }
+ else if (token == CPP_NUMBER)
+ {
+ align = TREE_INT_CST_LOW (x);
+ action = push;
+ if (c_lex (&x) != CPP_CLOSE_PAREN)
+ BAD ("malformed '#pragma pack' - ignored");
+ }
+ else
+ BAD ("malformed '#pragma pack' - ignored");
+
+ if (c_lex (&x) != CPP_EOF)
+ warning ("junk at end of '#pragma pack'");
+
+ switch (align)
+ {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ align *= BITS_PER_UNIT;
+ break;
+ default:
+ BAD2 ("alignment must be a small power of two, not %d", align);
+ }
+
+ switch (action)
+ {
+ case pop: pop_field_alignment (); break;
+ case push: push_field_alignment (align, 0, 0); break;
+ case set: break;
+ }
}
+/* APPLE LOCAL end Macintosh alignment 2002-1-22 ff */
/* #pragma unused ([var {, var}*]) */
@@ -464,3 +581,101 @@ find_subframework_header (cpp_reader *pfile, const char *header)
}
struct target_c_incpath_s target_c_incpath = C_INCPATH_INIT;
+
+/* APPLE LOCAL begin CALL_ON_LOAD/CALL_ON_UNLOAD pragmas 20020202 turly */
+extern void mod_init_section (void), mod_term_section (void);
+/* Grab the function name from the pragma line and output it to the
+ assembly output file with the parameter DIRECTIVE. Called by the
+ pragma CALL_ON_LOAD and CALL_ON_UNLOAD handlers below.
+ So: "#pragma CALL_ON_LOAD foo" will output ".mod_init_func _foo". */
+
+static void directive_with_named_function (
+ const char *pragma_name,
+ void (*section_function) (void))
+{
+ tree decl;
+ int tok;
+
+ tok = c_lex (&decl);
+ if (tok == CPP_NAME && decl)
+ {
+ extern FILE *asm_out_file;
+
+ section_function ();
+ fprintf (asm_out_file, "\t.long _%s\n", IDENTIFIER_POINTER (decl));
+
+ if (c_lex (&decl) != CPP_EOF)
+ warning ("junk at end of #pragma %s <function_name>\n", pragma_name);
+ }
+ else
+ warning ("function name expected after #pragma %s\n", pragma_name);
+}
+void
+darwin_pragma_call_on_load (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ directive_with_named_function ("CALL_ON_LOAD", mod_init_section);
+}
+void
+darwin_pragma_call_on_unload (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ directive_with_named_function ("CALL_ON_UNLOAD", mod_term_section);
+}
+/* APPLE LOCAL end CALL_ON_LOAD/CALL_ON_UNLOAD pragmas 20020202 turly */
+
+/* APPLE LOCAL begin CALL_ON_MODULE_BIND deprecated 2002-4-10 ff */
+void
+darwin_pragma_call_on_module_bind (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ warning ("#pragma CALL_ON_MODULE_BIND is no longer supported, ignoring. "
+ "Use CALL_ON_LOAD instead.");
+}
+/* APPLE LOCAL end CALL_ON_MODULE_BIND deprecated 2002-4-10 ff */
+
+/* APPLE LOCAL begin temporary pragmas 2001-07-05 sts */
+/* These need to live only long enough to get their uses flushed out
+ of the system. */
+void
+darwin_pragma_cc_no_mach_text_sections (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ warning ("#pragma CC_NO_MACH_TEXT_SECTIONS is no longer supported, ignoring");
+}
+
+void
+darwin_pragma_cc_opt_off (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ warning ("#pragma CC_OPT_OFF is no longer supported, ignoring");
+}
+
+void
+darwin_pragma_cc_opt_on (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ warning ("#pragma CC_OPT_ON is no longer supported, ignoring");
+}
+
+void
+darwin_pragma_cc_opt_restore (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ warning ("#pragma CC_OPT_RESTORE is no longer supported, ignoring");
+}
+
+void
+darwin_pragma_cc_writable_strings (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ warning ("#pragma CC_WRITABLE_STRINGS is no longer supported, ignoring");
+}
+
+void
+darwin_pragma_cc_non_writable_strings (pfile)
+ cpp_reader *pfile ATTRIBUTE_UNUSED;
+{
+ warning ("#pragma CC_NON_WRITABLE_STRINGS is no longer supported, ignoring");
+}
+/* APPLE LOCAL end temporary pragmas 2001-07-05 sts */
diff --git a/gcc/config/darwin-driver.c b/gcc/config/darwin-driver.c
new file mode 100644
index 00000000000..cc74b314c5e
--- /dev/null
+++ b/gcc/config/darwin-driver.c
@@ -0,0 +1,989 @@
+/* Darwin driver program that handles -arch commands and invokes
+ appropriate compiler driver.
+ Copyright (C) 2004 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <mach-o/arch.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include "libiberty.h"
+#include "filenames.h"
+
+/* Hack!.
+ Pay the price for including darwin.h. */
+typedef int tree;
+
+/* Include darwin.h for SWITCH_TAKES_ARG and
+ WORD_SWIATCH_TAKES_ARG. */
+
+#include "darwin.h"
+
+/* Include gcc.h for DEFAULT_SWITCH_TAKES_ARG and
+ DEFAULT_WORD_SWIATCH_TAKES_ARG. */
+
+#include "gcc.h"
+
+/* This program name. */
+const char *progname;
+
+/* driver prefix. */
+const char *driver_exec_prefix;
+
+/* driver prefix length. */
+int prefix_len;
+
+/* current working directory. */
+char *curr_dir;
+
+/* Use if -o flag is absent. */
+const char *final_output = "a.out";
+
+/* Variabless to track presence and/or absence of important command
+ line options. */
+int compile_only_request = 0;
+int asm_output_request = 0;
+int preprocessed_output_request = 0;
+int ima_is_used = 0;
+int dash_dynamiclib_seen = 0;
+
+/* Support at the max 10 arch. at a time. This is historical limit. */
+#define MAX_ARCHES 10
+
+/* Name of user supplied architectures. */
+const char *arches[MAX_ARCHES];
+
+/* -arch counter. */
+static int num_arches;
+
+/* Input filenames. */
+struct input_filename
+{
+ const char *name;
+ int index;
+ struct input_filename *next;
+};
+struct input_filename *in_files;
+struct input_filename *last_infile;
+
+static int num_infiles;
+
+/* User specified output file name. */
+const char *output_filename = NULL;
+
+/* Output file names used for arch specific driver invocation. These
+ are input file names for 'lipo'. */
+const char **out_files;
+static int num_outfiles;
+
+/* Architecture names used by config.guess does not match the names
+ used by NXGet... Use this hand coded mapping to connect them. */
+struct arch_config_guess_map
+{
+ const char *arch_name;
+ const char *config_string;
+};
+
+struct arch_config_guess_map arch_config_map [] =
+{
+ {"i386", "i686"},
+ {"ppc", "powerpc"},
+ {NULL, NULL}
+};
+
+/* List of interpreted command line flags. Supply this to gcc driver. */
+const char **new_argv;
+int new_argc;
+
+/* Argument list for 'lipo'. */
+const char **lipo_argv;
+
+/* Info about the sub process. Need one subprocess for each arch plus
+ additional one for 'lipo'. */
+struct command
+{
+ const char *prog;
+ const char **argv;
+ int pid;
+} commands[MAX_ARCHES+1];
+
+/* total number of argc. */
+static int total_argc;
+
+static int greatest_status = 0;
+static int signal_count = 0;
+
+#ifndef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) DEFAULT_SWITCH_TAKES_ARG(CHAR)
+#endif
+
+#ifndef WORD_SWITCH_TAKES_ARG
+#define WORD_SWITCH_TAKES_ARG(STR) DEFAULT_WORD_SWITCH_TAKES_ARG (STR)
+#endif
+
+
+/* Local function prototypes. */
+static const char * get_arch_name (const char *);
+static char * get_driver_name (const char *);
+static void delete_out_files (void);
+static char * strip_path_and_suffix (const char *, const char *);
+static void initialize (void);
+static void final_cleanup (void);
+static int do_wait (int, const char *);
+static int do_lipo (int, const char *);
+static int do_compile (const char **, int);
+static int do_compile_separately (void);
+static int do_lipo_separately (void);
+static int add_arch_options (int, const char **, int);
+static int remove_arch_options (const char**, int);
+
+/* Find arch name for the given input string. If input name is NULL then local
+ arch name is used. */
+
+static const char *
+get_arch_name (const char *name)
+{
+ const NXArchInfo * a_info;
+ const NXArchInfo * all_info;
+ cpu_type_t cputype;
+
+ /* Find cputype associated with the given name. */
+ if (!name)
+ a_info = NXGetLocalArchInfo ();
+ else
+ a_info = NXGetArchInfoFromName (name);
+
+ if (!a_info)
+ abort ();
+
+ cputype = a_info->cputype;
+
+ /* Now collect ALL supported arch info. */
+ all_info = NXGetAllArchInfos ();
+
+ if (!all_info)
+ abort ();
+
+ /* Find first arch. that matches cputype. */
+ while (all_info->name)
+ {
+ if (all_info->cputype == cputype)
+ break;
+ else
+ all_info++;
+ }
+
+ return all_info->name;
+}
+
+/* Find driver name based on input arch name. */
+
+static char *
+get_driver_name (const char *arch_name)
+{
+ char *driver_name;
+ const char *config_name;
+ int len;
+ int index;
+ struct arch_config_guess_map *map;
+
+ /* find config name based on arch name. */
+ config_name = NULL;
+ map = arch_config_map;
+ while (map->arch_name)
+ {
+ if (!strcmp (map->arch_name, arch_name))
+ {
+ config_name = map->config_string;
+ break;
+ }
+ else map++;
+ }
+
+ if (!config_name)
+ abort ();
+
+ len = strlen (config_name) + strlen (PDN) + prefix_len + 1;
+ driver_name = (char *) malloc (sizeof (char) * len);
+
+ if (driver_exec_prefix)
+ strcpy (driver_name, driver_exec_prefix);
+ strcat (driver_name, config_name);
+ strcat (driver_name, PDN);
+
+ return driver_name;
+}
+
+/* Delete out_files. */
+
+static void
+delete_out_files (void)
+{
+ const char *temp;
+ struct stat st;
+ int i = 0;
+
+ for (i = 0, temp = out_files[i];
+ temp && i < total_argc * MAX_ARCHES;
+ temp = out_files[++i])
+ if (stat (temp, &st) >= 0 && S_ISREG (st.st_mode))
+ unlink (temp);
+
+}
+
+/* Put fatal error message on stderr and exit. */
+
+void
+fatal (const char *msgid, ...)
+{
+ va_list ap;
+
+ va_start (ap, msgid);
+
+ fprintf (stderr, "%s: ", progname);
+ vfprintf (stderr, msgid, ap);
+ va_end (ap);
+ fprintf (stderr, "\n");
+ delete_out_files ();
+ exit (1);
+}
+
+/* Print error message and exit. */
+
+static void
+pfatal_pexecute (const char *errmsg_fmt, const char *errmsg_arg)
+{
+ if (errmsg_arg)
+ {
+ int save_errno = errno;
+
+ /* Space for trailing '\0' is in %s. */
+ char *msg = (char *) malloc (strlen (errmsg_fmt) + strlen (errmsg_arg));
+ sprintf (msg, errmsg_fmt, errmsg_arg);
+ errmsg_fmt = msg;
+
+ errno = save_errno;
+ }
+
+ fprintf (stderr,"%s: %s: %s", progname, errmsg_fmt, xstrerror (errno));
+ delete_out_files ();
+ exit (1);
+}
+
+#ifdef DEBUG
+static void
+debug_command_line (const char **debug_argv, int debug_argc)
+{
+ int i;
+
+ fprintf (stderr,"%s: debug_command_line\n", progname);
+ fprintf (stderr,"%s: arg count = %d\n", progname, debug_argc);
+
+ for (i = 0; debug_argv[i]; i++)
+ fprintf (stderr,"%s: arg [%d] %s\n", progname, i, debug_argv[i]);
+}
+#endif
+
+/* Strip directory name from the input file name and replace file name
+ suffix with new. */
+
+static char *
+strip_path_and_suffix (const char *full_name, const char *new_suffix)
+{
+ char *name;
+ char *p;
+
+ if (!full_name || !new_suffix)
+ return NULL;
+
+ /* Strip path name. */
+ p = (char *)full_name + strlen (full_name);
+ while (p != full_name && !IS_DIR_SEPARATOR (p[-1]))
+ --p;
+
+ /* Now 'p' is a file name with suffix. */
+ name = (char *) malloc (strlen (p) + 1 + strlen (new_suffix));
+
+ name = p;
+
+ p = name + strlen (name);
+ while (p != name && *p != '.')
+ --p;
+
+ /* If did not reach at the beginning of name then '.' is found.
+ Replace '.' with NULL. */
+ if (p != name)
+ *p = '\0';
+
+ strcat (name, new_suffix);
+ return name;
+}
+
+/* Initialization */
+
+static void
+initialize (void)
+{
+
+ int i;
+
+ /* Let's count, how many additional arguments driver driver will supply
+ to compiler driver:
+
+ Each "-arch" "<blah>" is replaced by approriate "-mcpu=<blah>".
+ That leaves one additional arg space available.
+
+ Note that only one -m* is supplied to each compiler driver. Which
+ means, extra "-arch" "<blah>" are removed from the original command
+ line. But lets not count how many additional slots are available.
+
+ Driver driver may need to specify temp. output file name, say
+ "-o" "foobar". That needs two extra argments.
+
+ Sometimes linker wants one additional "-Wl,-arch_multiple".
+
+ Sometimes linker wants to see "-final_output" "outputname".
+
+ In the end, We need FOUR extra argument. */
+
+ new_argv = (const char **) malloc ((total_argc + 4) * sizeof (const char *));
+ if (!new_argv)
+ abort ();
+
+ /* First slot, new_argv[0] is reserved for the driver name. */
+ new_argc = 1;
+
+ /* For each -arch, three arguments are needed.
+ For example, "-arch" "ppc" "file". Additional slots are for
+ "lipo" "-create" "-o" and "outputfilename". */
+ lipo_argv = (const char **) malloc ((total_argc * 3 + 5) * sizeof (const char *));
+ if (!lipo_argv)
+ abort ();
+
+ /* Need separate out_files for each arch, max is MAX_ARCHES.
+ Need separate out_files for each input file. */
+
+ out_files = (const char **) malloc ((total_argc * MAX_ARCHES) * sizeof (const char *));
+ if (!out_files)
+ abort ();
+
+ num_arches = 0;
+ num_infiles = 0;
+
+ in_files = NULL;
+ last_infile = NULL;
+
+ for (i = 0; i < (MAX_ARCHES + 1); i++)
+ {
+ commands[i].prog = NULL;
+ commands[i].argv = NULL;
+ commands[i].pid = 0;
+ }
+}
+
+/* Cleanup. */
+
+static void
+final_cleanup (void)
+{
+ int i;
+ struct input_filename *next;
+ delete_out_files ();
+ free (new_argv);
+ free (lipo_argv);
+ free (out_files);
+
+ for (i = 0, next = in_files;
+ i < num_infiles && next;
+ i++)
+ {
+ next = in_files->next;
+ free (in_files);
+ in_files = next;
+ }
+}
+
+/* Wait for the process pid and return appropriate code. */
+
+static int
+do_wait (int pid, const char *prog)
+{
+ int status = 0;
+ int ret = 0;
+
+ pid = pwait (pid, &status, 0);
+
+ if (WIFSIGNALED (status))
+ {
+ if (!signal_count &&
+ WEXITSTATUS (status) > greatest_status)
+ greatest_status = WEXITSTATUS (status);
+ ret = -1;
+ }
+ else if (WIFEXITED (status)
+ && WEXITSTATUS (status) >= 1)
+ {
+ if (WEXITSTATUS (status) > greatest_status)
+ greatest_status = WEXITSTATUS (status);
+ signal_count++;
+ ret = -1;
+ }
+ return ret;
+}
+
+/* Invoke 'lipo' and combine and all output files. */
+
+static int
+do_lipo (int start_outfile_index, const char *out_file)
+{
+ int i, j, pid;
+ char *errmsg_fmt, *errmsg_arg;
+
+ /* Populate lipo arguments. */
+ lipo_argv[0] = "lipo";
+ lipo_argv[1] = "-create";
+ lipo_argv[2] = "-o";
+ lipo_argv[3] = out_file;
+
+ /* Already 4 lipo arguments are set. Now add all lipo inputs. */
+ j = 4;
+ for (i = 0; i < num_arches; i++)
+ {
+ lipo_argv[j++] = "-arch";
+ lipo_argv[j++] = arches[i];
+ lipo_argv[j++] = out_files[start_outfile_index + i];
+ }
+
+#ifdef DEBUG
+ debug_command_line (lipo_argv, j);
+#endif
+
+ pid = pexecute (lipo_argv[0], (char *const *)lipo_argv, progname, NULL, &errmsg_fmt,
+ &errmsg_arg, PEXECUTE_SEARCH | PEXECUTE_LAST);
+
+ if (pid == -1)
+ pfatal_pexecute (errmsg_fmt, errmsg_arg);
+
+ return do_wait (pid, lipo_argv[0]);
+}
+
+/* Invoke compiler for all architectures. */
+
+static int
+do_compile (const char **current_argv, int current_argc)
+{
+ char *errmsg_fmt, *errmsg_arg;
+ int index = 0;
+ int ret = 0;
+
+ int dash_o_index = current_argc;
+ int of_index = current_argc + 1;
+ int argc_count = current_argc + 2;
+
+ while (index < num_arches)
+ {
+ int additional_arch_options = 0;
+
+ current_argv[0] = get_driver_name (get_arch_name (arches[index]));
+
+ /* setup output file. */
+ out_files[num_outfiles] = make_temp_file (".out");
+ current_argv[dash_o_index] = "-o";
+ current_argv[of_index] = out_files [num_outfiles];
+ num_outfiles++;
+
+ /* Add arch option as the last option. Do not add any other option
+ before removing this option. */
+ additional_arch_options = add_arch_options (index, current_argv, argc_count);
+
+ commands[index].prog = current_argv[0];
+ commands[index].argv = current_argv;
+
+#ifdef DEBUG
+ debug_command_line (current_argv, of_index);
+#endif
+ commands[index].pid = pexecute (current_argv[0],
+ (char *const *)current_argv,
+ progname, NULL,
+ &errmsg_fmt,
+ &errmsg_arg,
+ PEXECUTE_SEARCH | PEXECUTE_LAST);
+
+ if (commands[index].pid == -1)
+ pfatal_pexecute (errmsg_fmt, errmsg_arg);
+
+ /* Remove the last arch option added in the current_argv list. */
+ if (additional_arch_options)
+ remove_arch_options (current_argv, argc_count);
+ index++;
+ }
+
+ index = 0;
+ while (index < num_arches)
+ {
+ ret = do_wait (commands[index].pid, commands[index].prog);
+ fflush (stdout);
+ index++;
+ }
+ return ret;
+}
+
+/* Invoke compiler for each input file separately.
+ Construct command line for each invocation with one input file. */
+
+static int
+do_compile_separately (void)
+{
+ const char **new_new_argv;
+ int i, new_new_argc;
+ struct input_filename *current_ifn;
+
+ if (num_infiles == 1 || ima_is_used)
+ abort ();
+
+ /* Total number of arguments in separate compiler invocation is :
+ total number of original arguments - total no input files + one input
+ file + "-o" + output file . */
+ new_new_argv = (const char **) malloc ((new_argc - num_infiles + 4) * sizeof (const char *));
+ if (!new_new_argv)
+ abort ();
+
+ for (current_ifn = in_files; current_ifn && current_ifn->name;
+ current_ifn = current_ifn->next)
+ {
+ struct input_filename *ifn = in_files;
+ int go_back = 0;
+ new_new_argc = 1;
+
+ for (i = 1; i < new_argc; i++)
+ {
+
+ if (ifn && ifn->name && !strcmp (new_argv[i], ifn->name))
+ {
+ /* This argument is one of the input file. */
+
+ if (!strcmp (new_argv[i], current_ifn->name))
+ {
+ /* If it is current input file name then add it in the new
+ list. */
+ new_new_argv[new_new_argc++] = new_argv[i];
+ }
+ /* This input file can not appear in
+ again on the command line so next time look for next input
+ file. */
+ ifn = ifn->next;
+ }
+ else
+ {
+ /* This argument is not a input file name. Add it into new
+ list. */
+ new_new_argv[new_new_argc++] = new_argv[i];
+ }
+ }
+
+ /* OK now we have only one input file and all other arguments. */
+ do_compile (new_new_argv, new_new_argc);
+ }
+}
+
+/* Invoke 'lipo' on set of output files and create multile FAT binaries. */
+
+static int
+do_lipo_separately (void)
+{
+ int ifn_index;
+ struct input_filename *ifn;
+ for (ifn_index = 0, ifn = in_files;
+ ifn_index < num_infiles && ifn && ifn->name;
+ ifn_index++, ifn = ifn->next)
+ do_lipo (ifn_index * num_arches,
+ strip_path_and_suffix (ifn->name, ".o"));
+}
+
+/* Replace -arch <blah> options with appropriate "-mcpu=<blah>" OR
+ "-march=<blah>". INDEX is the index in arches[] table. */
+
+static int
+add_arch_options (int index, const char **current_argv, int arch_index)
+{
+
+ int count;
+
+ /* We are adding 1 argument for selected arches. */
+ count = 1;
+
+#ifdef DEBUG
+ fprintf (stderr, "%s: add_arch_options\n", progname);
+#endif
+
+ if (!strcmp (arches[index], "ppc601"))
+ current_argv[arch_index] = "-mcpu=601";
+ else if (!strcmp (arches[index], "ppc603"))
+ current_argv[arch_index] = "-mcpu=603";
+ else if (!strcmp (arches[index], "ppc604"))
+ current_argv[arch_index] = "-mcpu=604";
+ else if (!strcmp (arches[index], "ppc604e"))
+ current_argv[arch_index] = "-mcpu=604e";
+ else if (!strcmp (arches[index], "ppc750"))
+ current_argv[arch_index] = "-mcpu=750";
+ else if (!strcmp (arches[index], "ppc7400"))
+ current_argv[arch_index] = "-mcpu=7400";
+ else if (!strcmp (arches[index], "ppc7450"))
+ current_argv[arch_index] = "-mcpu=7450";
+ else if (!strcmp (arches[index], "ppc970"))
+ current_argv[arch_index] = "-mcpu=970";
+ else if (!strcmp (arches[index], "i386"))
+ current_argv[arch_index] = "-march=i386";
+ else if (!strcmp (arches[index], "i486"))
+ current_argv[arch_index] = "-march=i486";
+ else if (!strcmp (arches[index], "i586"))
+ current_argv[arch_index] = "-march=i586";
+ else if (!strcmp (arches[index], "i686"))
+ current_argv[arch_index] = "-march=i686";
+ else if (!strcmp (arches[index], "pentium"))
+ current_argv[arch_index] = "-march=pentium";
+ else if (!strcmp (arches[index], "pentpro"))
+ current_argv[arch_index] = "-march=pentiumpro";
+ else if (!strcmp (arches[index], "pentIIm3"))
+ current_argv[arch_index] = "-march=pentium3";
+ else
+ count = 0;
+
+ return count;
+}
+
+/* Remove the last option, which is arch option, added by
+ add_arch_options. Return how count of arguments removed. */
+static int
+remove_arch_options (const char **current_argv, int arch_index)
+{
+#ifdef DEBUG
+ fprintf (stderr, "%s: Removing argument no %d\n", progname, arch_index);
+#endif
+
+ current_argv[arch_index] = '\0';
+
+#ifdef DEBUG
+ debug_command_line (current_argv, arch_index);
+#endif
+
+ return 1;
+}
+
+/* Main entry point. This is gcc driver driver!
+ Interpret -arch flag from the list of input arguments. Invoke appropriate
+ compiler driver. 'lipo' the results if more than one -arch is supplied. */
+int
+main (int argc, const char **argv)
+{
+ size_t i;
+ int l, pid, ret, argv_0_len, prog_len;
+ char *errmsg_fmt, *errmsg_arg;
+
+ total_argc = argc;
+ argv_0_len = strlen (argv[0]);
+ prog_len = 0;
+
+ /* Get the progname, required by pexecute () and program location. */
+ progname = argv[0] + argv_0_len;
+ while (progname != argv[0] && !IS_DIR_SEPARATOR (progname[-1]))
+ {
+ prog_len++;
+ --progname;
+ }
+
+ /* Setup driver prefix. */
+ prefix_len = argv_0_len - prog_len;
+ curr_dir = (char *) malloc (sizeof (char) * (prefix_len + 1));
+ strncpy (curr_dir, argv[0], prefix_len);
+ curr_dir[prefix_len] = '\0';
+ driver_exec_prefix = (argv[0], "/usr/bin", curr_dir);
+
+#ifdef DEBUG
+ fprintf (stderr,"%s: full progname = %s\n", progname, argv[0]);
+ fprintf (stderr,"%s: progname = %s\n", progname, progname);
+ fprintf (stderr,"%s: driver_exec_prefix = %s\n", progname, driver_exec_prefix);
+#endif
+
+ initialize ();
+
+ /* Process arguments. Take appropriate actions when
+ -arch, -c, -S, -E, -o is encountered. Find input file name. */
+ for (i = 1; i < argc; i++)
+ {
+ if (!strcmp (argv[i], "-arch"))
+ {
+ if (i + 1 >= argc)
+ abort ();
+
+ /* arches[num_arches] = get_arch_name (argv[i+1]);*/
+ arches[num_arches] = argv[i+1];
+
+ num_arches++;
+ i++;
+ }
+ else if (!strcmp (argv[i], "-c"))
+ {
+ new_argv[new_argc++] = argv[i];
+ compile_only_request = 1;
+ }
+ else if (!strcmp (argv[i], "-S"))
+ {
+ new_argv[new_argc++] = argv[i];
+ asm_output_request = 1;
+ }
+ else if (!strcmp (argv[i], "-E"))
+ {
+ new_argv[new_argc++] = argv[i];
+ preprocessed_output_request = 1;
+ }
+ else if (!strcmp (argv[i], "-dynamiclib"))
+ {
+ new_argv[new_argc++] = argv[i];
+ dash_dynamiclib_seen = 1;
+ }
+
+ else if (!strcmp (argv[i], "-o"))
+ {
+ if (i + 1 >= argc)
+ abort ();
+
+ output_filename = argv[i+1];
+ i++;
+ }
+ else if ((! strcmp (argv[i], "-pass-exit-codes"))
+ || (! strcmp (argv[i], "-print-search-dirs"))
+ || (! strcmp (argv[i], "-print-libgcc-file-name"))
+ || (! strncmp (argv[i], "-print-file-name=", 17))
+ || (! strncmp (argv[i], "-print-prog-name=", 17))
+ || (! strcmp (argv[i], "-print-multi-lib"))
+ || (! strcmp (argv[i], "-print-multi-directory"))
+ || (! strcmp (argv[i], "-print-multi-os-directory"))
+ || (! strcmp (argv[i], "-ftarget-help"))
+ || (! strcmp (argv[i], "-fhelp"))
+ || (! strcmp (argv[i], "+e"))
+ || (! strncmp (argv[i], "-Wa,",4))
+ || (! strncmp (argv[i], "-Wp,",4))
+ || (! strncmp (argv[i], "-Wl,",4))
+ || (! strncmp (argv[i], "-l", 2))
+ || (! strncmp (argv[i], "-weak-l", 7))
+ || (! strncmp (argv[i], "-specs=", 7))
+ || (! strcmp (argv[i], "-ObjC"))
+ || (! strcmp (argv[i], "-fobjC"))
+ || (! strcmp (argv[i], "-ObjC++"))
+ || (! strcmp (argv[i], "-time"))
+ || (! strcmp (argv[i], "-###"))
+ || (! strcmp (argv[i], "-fconstant-cfstrings"))
+ || (! strcmp (argv[i], "-fno-constant-cfstrings"))
+ || (! strcmp (argv[i], "-save-temps"))
+ || (! strcmp (argv[i], "-static-libgcc"))
+ || (! strcmp (argv[i], "-shared-libgcc"))
+ || (! strcmp (argv[i], "-pipe"))
+ )
+ {
+ new_argv[new_argc++] = argv[i];
+ }
+ else if ((! strcmp (argv[i], "-Xlinker"))
+ || (! strcmp (argv[i], "-Xassembler"))
+ || (! strcmp (argv[i], "-Xpreprocessor"))
+ || (! strcmp (argv[i], "-l"))
+ || (! strcmp (argv[i], "-weak_library"))
+ || (! strcmp (argv[i], "-weak_framework"))
+ || (! strcmp (argv[i], "-specs"))
+ || (! strcmp (argv[i], "-framework"))
+ )
+ {
+ new_argv[new_argc++] = argv[i];
+ i++;
+ new_argv[new_argc++] = argv[i];
+ }
+ else if (argv[i][0] == '-' && argv[i][1] != 0)
+ {
+ const char *p = &argv[i][1];
+ int c = *p;
+
+ /* First copy this flag itself. */
+ new_argv[new_argc++] = argv[i];
+
+ /* Now copy this flag's arguments, if any, appropriately. */
+ if (c == 'x')
+ {
+ if (p[1] == 0 && i + 1 == argc)
+ fatal ("argument to `-x` is missing");
+
+ if (p[1] == 0)
+ {
+ i++;
+ new_argv[new_argc++] = argv[i];
+ }
+ }
+
+ if ((SWITCH_TAKES_ARG (c) > (p[1] != 0))
+ || WORD_SWITCH_TAKES_ARG (p))
+ {
+ int j = 0;
+ int n_args = WORD_SWITCH_TAKES_ARG (p);
+ if (n_args == 0)
+ {
+ /* Count only the option arguments in separate argv elements. */
+ n_args = SWITCH_TAKES_ARG (c) - (p[1] != 0);
+ }
+ if (i + n_args >= argc)
+ fatal ("argument to `-%s' is missing", p);
+
+
+ while ( j < n_args)
+ {
+ i++;
+ new_argv[new_argc++] = argv[i];
+ j++;
+ }
+ }
+
+ }
+ else
+ {
+ struct input_filename *ifn;
+ new_argv[new_argc++] = argv[i];
+ ifn = (struct input_filename *) malloc (sizeof (struct input_filename));
+ ifn->name = argv[i];
+ ifn->index = i;
+ num_infiles++;
+
+ if (last_infile)
+ last_infile->next = ifn;
+ else
+ in_files = ifn;
+
+ last_infile = ifn;
+ }
+ }
+
+#if 0
+ if (num_infiles == 0)
+ fatal ("no input files");
+#endif
+
+ if (preprocessed_output_request && asm_output_request && num_infiles > 1)
+ fatal ("-E and -S are not allowed with multiple -arch flags");
+
+ /* If -arch is not present OR Only one -arch <blah> is specified.
+ Invoke appropriate compiler driver. FAT build is not required in this
+ case. */
+
+ if (num_arches == 0 || num_arches == 1)
+ {
+
+ /* If no -arch is specified than use host compiler driver. */
+ if (num_arches == 0)
+ new_argv[0] = get_driver_name (get_arch_name (NULL));
+ else if (num_arches == 1)
+ {
+ /* Find compiler driver based on -arch <foo> and add approriate
+ -m* argument. */
+ new_argv[0] = get_driver_name (get_arch_name (arches[0]));
+ new_argc = new_argc + add_arch_options (0, new_argv, new_argc);
+ }
+
+
+#ifdef DEBUG
+ printf ("%s: invoking single driver name = %s\n", progname, new_argv[0]);
+#endif
+
+ /* Re insert output file name. */
+ if (!compile_only_request || output_filename)
+ {
+ new_argv[new_argc++] = "-o";
+ new_argv[new_argc++] = (output_filename ?
+ output_filename : final_output);
+ }
+
+#ifdef DEBUG
+ debug_command_line (new_argv, new_argc);
+#endif
+
+ pid = pexecute (new_argv[0], (char *const *)new_argv, progname, NULL,
+ &errmsg_fmt, &errmsg_arg, PEXECUTE_SEARCH | PEXECUTE_LAST);
+
+ if (pid == -1)
+ pfatal_pexecute (errmsg_fmt, errmsg_arg);
+
+ ret = do_wait (pid, new_argv[0]);
+ }
+ else
+ {
+ /* Handle multiple -arch <blah>. */
+
+ /* If more than one input files are supplied but only one output filename
+ is pressent then IMA will be used. */
+ if (num_infiles > 1 && output_filename)
+ ima_is_used = 1;
+
+ /* Linker wants to know this in case of multiple -arch. */
+ if (!compile_only_request && !dash_dynamiclib_seen)
+ new_argv[new_argc++] = "-Wl,-arch_multiple";
+
+
+ /* If only one input file is specified OR IMA is used then expected output
+ is one FAT binary. */
+ if (num_infiles == 1 || ima_is_used)
+ {
+ const char *out_file;
+
+ /* Create output file name based on
+ input filename, if required. */
+ if (compile_only_request && !output_filename && num_infiles == 1)
+ out_file = strip_path_and_suffix (in_files->name, ".o");
+ else
+ out_file = (output_filename ? output_filename : final_output);
+
+
+ /* Linker wants to know name of output file using one extra arg. */
+ if (!compile_only_request)
+ {
+ new_argv[new_argc++] = "-final_output";
+ new_argv[new_argc++] = out_file;
+ }
+
+ /* Compile file(s) for each arch and lipo 'em together. */
+ ret = do_compile (new_argv, new_argc);
+
+ /* Make FAT binary by combining individual output files for each
+ architecture, using 'lipo'. */
+ ret = do_lipo (0, out_file);
+ }
+ else
+ {
+ /* Multiple input files are present and IMA is not used.
+ Which means need to generate multiple FAT files. */
+ ret = do_compile_separately ();
+ ret = do_lipo_separately ();
+ }
+ }
+
+ final_cleanup ();
+ free (curr_dir);
+ return greatest_status;
+}
diff --git a/gcc/config/darwin-protos.h b/gcc/config/darwin-protos.h
index 28660339488..21426971c23 100644
--- a/gcc/config/darwin-protos.h
+++ b/gcc/config/darwin-protos.h
@@ -25,6 +25,8 @@ extern void machopic_validate_stub_or_non_lazy_ptr (const char *, int);
extern const char *machopic_function_base_name (void);
extern void machopic_output_function_base_name (FILE *);
extern const char *machopic_stub_name (const char*);
+/* APPLE LOCAL coalescing */
+extern int machopic_var_referred_to_p (const char*);
extern void machopic_picsymbol_stub_section (void);
extern void machopic_picsymbol_stub1_section (void);
@@ -75,6 +77,34 @@ extern void darwin_pragma_ignore (struct cpp_reader *);
extern void darwin_pragma_options (struct cpp_reader *);
extern void darwin_pragma_unused (struct cpp_reader *);
+/* APPLE LOCAL begin Macintosh alignment 2002-1-22 ff */
+extern void darwin_pragma_pack (struct cpp_reader *);
+/* APPLE LOCAL end Macintosh alignment 2002-1-22 ff */
+/* APPLE LOCAL begin CALL_ON_LOAD/CALL_ON_UNLOAD pragmas 20020202 turly */
+extern void darwin_pragma_call_on_load (struct cpp_reader *);
+extern void darwin_pragma_call_on_unload (struct cpp_reader *);
+/* APPLE LOCAL end CALL_ON_LOAD/CALL_ON_UNLOAD pragmas 20020202 turly */
+/* APPLE LOCAL begin CALL_ON_MODULE_BIND deprecated 2002-4-10 ff */
+extern void darwin_pragma_call_on_module_bind (struct cpp_reader *);
+/* APPLE LOCAL end CALL_ON_MODULE_BIND deprecated 2002-4-10 ff */
+/* APPLE LOCAL begin temporary pragmas 2001-07-05 sts */
+extern void darwin_pragma_cc_no_mach_text_sections (struct cpp_reader *);
+extern void darwin_pragma_cc_opt_off (struct cpp_reader *);
+extern void darwin_pragma_cc_opt_on (struct cpp_reader *);
+extern void darwin_pragma_cc_opt_restore (struct cpp_reader *);
+extern void darwin_pragma_cc_writable_strings (struct cpp_reader *);
+extern void darwin_pragma_cc_non_writable_strings (struct cpp_reader *);
+/* APPLE LOCAL end temporary pragmas 2001-07-05 sts */
+
+/* APPLE LOCAL begin coalescing */
+extern void darwin_asm_named_section (const char *, unsigned int);
+extern unsigned int darwin_section_type_flags (tree, const char *, int);
+extern int darwin_set_section_for_var_p (tree, int, int);
+/* APPLE LOCAL end coalescing */
+
+/* APPLE LOCAL double destructor */
+extern tree darwin_handle_odd_attribute (tree *, tree, tree, int, bool *);
+
extern void darwin_file_end (void);
/* Expanded by EXTRA_SECTION_FUNCTIONS into varasm.o. */
@@ -108,6 +138,8 @@ extern void objc_class_names_section (void);
extern void objc_meth_var_names_section (void);
extern void objc_meth_var_types_section (void);
extern void objc_cls_refs_section (void);
+/* APPLE LOCAL constant cfstrings */
+extern void cfstring_constant_object_section (void);
extern void machopic_lazy_symbol_ptr_section (void);
extern void machopic_nl_symbol_ptr_section (void);
extern void machopic_symbol_stub_section (void);
@@ -119,3 +151,6 @@ extern void darwin_globalize_label (FILE *, const char *);
extern void darwin_assemble_visibility (tree, int);
extern void darwin_asm_output_dwarf_delta (FILE *, int, const char *,
const char *);
+/* APPLE LOCAL C++ EH */
+extern void darwin_non_lazy_pcrel (FILE *file, rtx addr);
+
diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c
index 858d150d45e..cc213a49388 100644
--- a/gcc/config/darwin.c
+++ b/gcc/config/darwin.c
@@ -46,7 +46,57 @@ Boston, MA 02111-1307, USA. */
static int machopic_data_defined_p (const char *);
static void update_non_lazy_ptrs (const char *);
static void update_stubs (const char *);
-static const char *machopic_non_lazy_ptr_name (const char*);
+const char *machopic_non_lazy_ptr_name (const char*);
+
+/* APPLE LOCAL prototypes */
+static tree machopic_non_lazy_ptr_list_entry PARAMS ((const char*, int));
+static tree machopic_stub_list_entry PARAMS ((const char *));
+
+/* APPLE LOCAL begin coalescing */
+void
+make_decl_coalesced (decl, private_extern_p)
+ tree decl;
+ int private_extern_p; /* 0 for global, 1 for private extern */
+{
+ int no_toc_p = 1; /* Don't add to table of contents */
+#if 0
+ const char *decl_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+#endif
+ static const char *const names[4] = {
+ "__TEXT,__textcoal,coalesced",
+ "__TEXT,__textcoal_nt,coalesced,no_toc",
+ "__DATA,__datacoal,coalesced",
+ "__DATA,__datacoal_nt,coalesced,no_toc",
+ };
+ const char *sec;
+ int idx;
+
+ /* Do nothing if coalescing is disabled. */
+ if (!COALESCING_ENABLED_P())
+ return;
+
+ /* We *do* need to mark these *INTERNAL* functions coalesced: though
+ these pseudo-functions themselves will never appear, their cloned
+ descendants need to be marked coalesced too. */
+#if 0
+ /* Don't touch anything with " *INTERNAL" in its name. */
+ if (strstr (decl_name, " *INTERNAL") != NULL)
+ return;
+#endif
+
+ DECL_COALESCED (decl) = 1;
+ if (private_extern_p)
+ DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
+ TREE_PUBLIC (decl) = 1;
+
+ idx = 0;
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ idx = 2;
+ sec = names[idx + (no_toc_p ? 1 : 0)];
+
+ DECL_SECTION_NAME (decl) = build_string (strlen (sec), sec);
+}
+/* APPLE LOCAL end coalescing */
int
name_needs_quotes (const char *name)
@@ -187,6 +237,14 @@ machopic_ident_defined_p (tree ident)
static int
machopic_data_defined_p (const char *name)
{
+ /* APPLE LOCAL BEGIN fix-and-continue mrs */
+#ifndef TARGET_INDIRECT_ALL_DATA
+#define TARGET_INDIRECT_ALL_DATA 0
+#endif
+ if (TARGET_INDIRECT_ALL_DATA)
+ return 0;
+ /* APPLE LOCAL END fix-and-continue mrs */
+
switch (machopic_classify_ident (get_identifier (name)))
{
case MACHOPIC_DEFINED_DATA:
@@ -264,18 +322,19 @@ static GTY(()) tree machopic_non_lazy_pointers;
either by finding it in our list of pointer names, or by generating
a new one. */
-static const char *
-machopic_non_lazy_ptr_name (const char *name)
+/* APPLE LOCAL weak import */
+/* machopic_non_lazy_ptr_list_entry separated from machopic_non_lazy_ptr_name */
+static tree
+machopic_non_lazy_ptr_list_entry (const char *name, int create_p)
{
- const char *temp_name;
- tree temp, ident = get_identifier (name);
-
+ tree temp, ident = (create_p) ? get_identifier (name) : NULL;
+
for (temp = machopic_non_lazy_pointers;
temp != NULL_TREE;
temp = TREE_CHAIN (temp))
{
if (ident == TREE_VALUE (temp))
- return IDENTIFIER_POINTER (TREE_PURPOSE (temp));
+ return temp;
}
name = darwin_strip_name_encoding (name);
@@ -287,14 +346,14 @@ machopic_non_lazy_ptr_name (const char *name)
{
if (TREE_VALUE (temp))
{
- temp_name = IDENTIFIER_POINTER (TREE_VALUE (temp));
+ const char *temp_name = IDENTIFIER_POINTER (TREE_VALUE (temp));
temp_name = darwin_strip_name_encoding (temp_name);
if (strcmp (name, temp_name) == 0)
- return IDENTIFIER_POINTER (TREE_PURPOSE (temp));
+ return temp;
}
}
- {
+ if (create_p) {
char *buffer;
int namelen = strlen (name);
int bufferlen = 0;
@@ -325,17 +384,41 @@ machopic_non_lazy_ptr_name (const char *name)
TREE_USED (machopic_non_lazy_pointers) = 0;
- return IDENTIFIER_POINTER (ptr_name);
+ return machopic_non_lazy_pointers;
}
+
+ return NULL;
}
+/* APPLE LOCAL begin coalescing */
+/* Was the variable NAME ever referenced? */
+int
+machopic_var_referred_to_p (name)
+ const char *name;
+{
+ return (machopic_non_lazy_ptr_list_entry (name, /*create:*/ 0) != NULL);
+}
+/* APPLE LOCAL end coalescing */
+
+/* APPLE LOCAL begin weak import */
+const char *
+machopic_non_lazy_ptr_name (name)
+ const char *name;
+{
+ return IDENTIFIER_POINTER (TREE_PURPOSE
+ (machopic_non_lazy_ptr_list_entry (name, /*create:*/ 1)));
+}
+/* APPLE LOCAL end weak import */
+
static GTY(()) tree machopic_stubs;
/* Return the name of the stub corresponding to the given name,
generating a new stub name if necessary. */
-const char *
-machopic_stub_name (const char *name)
+/* APPLE LOCAL begin weak import */
+/* machopic_stub_list_entry separated from machopic_stub_name */
+static tree
+machopic_stub_list_entry (const char *name)
{
tree temp, ident = get_identifier (name);
const char *tname;
@@ -345,16 +428,22 @@ machopic_stub_name (const char *name)
temp = TREE_CHAIN (temp))
{
if (ident == TREE_VALUE (temp))
- return IDENTIFIER_POINTER (TREE_PURPOSE (temp));
+ return temp;
tname = IDENTIFIER_POINTER (TREE_VALUE (temp));
if (strcmp (name, tname) == 0)
- return IDENTIFIER_POINTER (TREE_PURPOSE (temp));
+ return temp;
+
+ /* APPLE LOCAL Stripped encodings ('!T_' and '!t_') should match. */
+ if (name [0] == '!' && tname[0] == '!'
+ && strcmp (name + 4, tname + 4) == 0)
+ return temp;
+
/* A library call name might not be section-encoded yet, so try
it against a stripped name. */
if (name[0] != '!'
&& tname[0] == '!'
&& strcmp (name, tname + 4) == 0)
- return IDENTIFIER_POINTER (TREE_PURPOSE (temp));
+ return temp;
}
name = darwin_strip_name_encoding (name);
@@ -406,10 +495,18 @@ machopic_stub_name (const char *name)
machopic_stubs = tree_cons (ptr_name, ident, machopic_stubs);
TREE_USED (machopic_stubs) = 0;
- return IDENTIFIER_POINTER (ptr_name);
+ return machopic_stubs;
}
}
+const char *
+machopic_stub_name (name)
+ const char *name;
+{
+ return IDENTIFIER_POINTER (TREE_PURPOSE (machopic_stub_list_entry (name)));
+}
+/* APPLE LOCAL end weak import */
+
void
machopic_validate_stub_or_non_lazy_ptr (const char *name, int validate_stub)
{
@@ -448,6 +545,8 @@ machopic_indirect_data_reference (rtx orig, rtx reg)
if (GET_CODE (orig) == SYMBOL_REF)
{
const char *name = XSTR (orig, 0);
+ /* APPLE LOCAL weak import */
+ tree sym;
int defined = machopic_data_defined_p (name);
if (defined && MACHO_DYNAMIC_NO_PIC_P)
@@ -499,8 +598,14 @@ machopic_indirect_data_reference (rtx orig, rtx reg)
return orig;
}
+ /* APPLE LOCAL weak import */
+ sym = machopic_non_lazy_ptr_list_entry (name, /*create:*/ 1);
+ IDENTIFIER_WEAK_IMPORT (TREE_PURPOSE (sym)) =
+ IDENTIFIER_WEAK_IMPORT (TREE_VALUE (sym)) =
+ SYMBOL_REF_WEAK_IMPORT (orig);
+
ptr_ref = gen_rtx_SYMBOL_REF (Pmode,
- machopic_non_lazy_ptr_name (name));
+ IDENTIFIER_POINTER (TREE_PURPOSE (sym)));
ptr_ref = gen_rtx_MEM (Pmode, ptr_ref);
RTX_UNCHANGING_P (ptr_ref) = 1;
@@ -585,9 +690,14 @@ machopic_indirect_call_target (rtx target)
if (!machopic_name_defined_p (name))
{
- const char *stub_name = machopic_stub_name (name);
-
- XEXP (target, 0) = gen_rtx_SYMBOL_REF (mode, stub_name);
+ /* APPLE LOCAL weak import */
+ tree stub = machopic_stub_list_entry (name);
+ IDENTIFIER_WEAK_IMPORT (TREE_PURPOSE (stub)) =
+ IDENTIFIER_WEAK_IMPORT (TREE_VALUE (stub)) =
+ SYMBOL_REF_WEAK_IMPORT (XEXP (target, 0));
+
+ XEXP (target, 0) = gen_rtx_SYMBOL_REF (mode,
+ IDENTIFIER_POINTER (TREE_PURPOSE (stub)));
RTX_UNCHANGING_P (target) = 1;
}
}
@@ -855,6 +965,43 @@ machopic_legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
if (RTX_UNCHANGING_P (base) && RTX_UNCHANGING_P (orig))
RTX_UNCHANGING_P (pic_ref) = 1;
+ /* APPLE LOCAL begin gen ADD */
+#ifdef MASK_80387
+ {
+ rtx mem, other;
+
+ if (GET_CODE (orig) == MEM) {
+ mem = orig; other = base;
+ /* Swap the kids only if there is only one MEM, and it's on the right. */
+ if (GET_CODE (base) != MEM) {
+ XEXP (pic_ref, 0) = orig;
+ XEXP (pic_ref, 1) = base;
+ }
+ }
+ else if (GET_CODE (base) == MEM) {
+ mem = base; other = orig;
+ } else
+ mem = other = NULL_RTX;
+
+ /* Both kids are MEMs. */
+ if (other && GET_CODE (other) == MEM)
+ other = force_reg (GET_MODE (other), other);
+
+ /* The x86 can't post-index a MEM; emit an ADD instruction to handle this. */
+ if (mem && GET_CODE (mem) == MEM) {
+ if ( ! reload_in_progress) {
+ rtx set = gen_rtx_SET (VOIDmode, reg, pic_ref);
+ rtx clobber_cc = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
+ pic_ref = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber_cc));
+ emit_insn (pic_ref);
+ pic_ref = reg;
+ is_complex = 0;
+ }
+ }
+ }
+#endif
+ /* APPLE LOCAL end gen ADD */
+
if (reg && is_complex)
{
emit_move_insn (reg, pic_ref);
@@ -871,7 +1018,12 @@ machopic_legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
else if (GET_CODE (orig) == MEM
&& GET_CODE (XEXP (orig, 0)) == SYMBOL_REF)
{
- rtx addr = machopic_legitimize_pic_address (XEXP (orig, 0), Pmode, reg);
+ /* APPLE LOCAL use new pseudo for temp; reusing reg confuses PRE */
+ rtx tempreg = reg;
+ rtx addr;
+ if ( !no_new_pseudos )
+ tempreg = gen_reg_rtx (Pmode);
+ addr = machopic_legitimize_pic_address (XEXP (orig, 0), Pmode, tempreg);
addr = gen_rtx_MEM (GET_MODE (orig), addr);
RTX_UNCHANGING_P (addr) = RTX_UNCHANGING_P (orig);
@@ -916,6 +1068,14 @@ machopic_finish (FILE *asm_out_file)
else
stub[0] = '_', strcpy (stub + 1, stub_name);
+ /* APPLE LOCAL weak import */
+ if ( IDENTIFIER_WEAK_IMPORT (TREE_VALUE (temp)))
+ {
+ fprintf (asm_out_file, "\t.weak_reference ");
+ assemble_name (asm_out_file, sym_name);
+ fprintf (asm_out_file, "\n");
+ }
+
machopic_output_stub (asm_out_file, sym, stub);
}
@@ -929,7 +1089,11 @@ machopic_finish (FILE *asm_out_file)
if (! TREE_USED (temp))
continue;
- if (machopic_ident_defined_p (TREE_VALUE (temp)))
+ /* APPLE LOCAL fix-and-continue mrs */
+ if (! TARGET_INDIRECT_ALL_DATA
+ && (machopic_ident_defined_p (TREE_VALUE (temp))
+ /* APPLE LOCAL private extern */
+ || (sym_name[0] == '!' && sym_name[2] == 'p')))
{
data_section ();
assemble_align (GET_MODE_ALIGNMENT (Pmode));
@@ -940,6 +1104,17 @@ machopic_finish (FILE *asm_out_file)
}
else
{
+ /* APPLE LOCAL fix-and-continue mrs */
+ rtx init = const0_rtx;
+
+ /* APPLE LOCAL weak import */
+ if ( IDENTIFIER_WEAK_IMPORT (TREE_VALUE (temp)))
+ {
+ fprintf (asm_out_file, "\t.weak_reference ");
+ assemble_name (asm_out_file, sym_name);
+ fprintf (asm_out_file, "\n");
+ }
+
machopic_nl_symbol_ptr_section ();
assemble_name (asm_out_file, lazy_name);
fprintf (asm_out_file, ":\n");
@@ -948,8 +1123,14 @@ machopic_finish (FILE *asm_out_file)
assemble_name (asm_out_file, sym_name);
fprintf (asm_out_file, "\n");
- assemble_integer (const0_rtx, GET_MODE_SIZE (Pmode),
+ /* APPLE LOCAL BEGIN fix-and-continue mrs */
+ if (sym_name[3] == 's'
+ && machopic_ident_defined_p (TREE_VALUE (temp)))
+ init = gen_rtx_SYMBOL_REF (Pmode, sym_name);
+
+ assemble_integer (init, GET_MODE_SIZE (Pmode),
GET_MODE_ALIGNMENT (Pmode), 1);
+ /* APPLE LOCAL END fix-and-continue mrs */
}
}
}
@@ -1004,11 +1185,28 @@ darwin_encode_section_info (tree decl, rtx rtl, int first ATTRIBUTE_UNUSED)
if ((TREE_CODE (decl) == FUNCTION_DECL
|| TREE_CODE (decl) == VAR_DECL)
&& !DECL_EXTERNAL (decl)
+ /* APPLE LOCAL coalescing */
+#ifdef DECL_IS_COALESCED_OR_WEAK
+ && ! DECL_IS_COALESCED_OR_WEAK (decl)
+#endif
&& ((TREE_STATIC (decl)
&& (!DECL_COMMON (decl) || !TREE_PUBLIC (decl)))
|| (DECL_INITIAL (decl)
&& DECL_INITIAL (decl) != error_mark_node)))
defined = 1;
+ /* APPLE LOCAL fix OBJC codegen */
+ if (TREE_CODE (decl) == VAR_DECL)
+ {
+ sym_ref = XEXP (DECL_RTL (decl), 0);
+ orig_str = XSTR (sym_ref, 0);
+ if ( orig_str[0] == '_'
+ && orig_str[1] == 'O'
+ && orig_str[2] == 'B'
+ && orig_str[3] == 'J'
+ && orig_str[4] == 'C'
+ && orig_str[5] == '_')
+ defined = 1;
+ }
if (TREE_CODE (decl) == FUNCTION_DECL)
code = (defined ? 'T' : 't');
@@ -1041,7 +1239,15 @@ darwin_encode_section_info (tree decl, rtx rtl, int first ATTRIBUTE_UNUSED)
new_str[0] = '!';
new_str[1] = code;
new_str[2] = '_';
+ /* APPLE LOCAL private extern */
+ if (DECL_VISIBILITY (decl) == VISIBILITY_HIDDEN)
+ new_str[2] = 'p';
new_str[3] = '_';
+ /* APPLE LOCAL BEGIN fix-and-continue mrs */
+ if (TARGET_INDIRECT_ALL_DATA
+ && TREE_CODE (decl) == VAR_DECL && ! TREE_PUBLIC (decl))
+ new_str[3] = 's';
+ /* APPLE LOCAL END fix-and-continue mrs */
memcpy (new_str + 4, orig_str, len);
XSTR (sym_ref, 0) = ggc_alloc_string (new_str, new_len);
}
@@ -1161,9 +1367,24 @@ machopic_select_section (tree exp, int reloc,
&& TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE
&& TYPE_NAME (TREE_TYPE (exp)))
{
+ /* APPLE LOCAL begin constant strings */
+ extern int flag_next_runtime;
+ extern const char *constant_string_class_name;
+ /* APPLE LOCAL end constant strings */
tree name = TYPE_NAME (TREE_TYPE (exp));
if (TREE_CODE (name) == TYPE_DECL)
name = DECL_NAME (name);
+ /* APPLE LOCAL begin constant strings */
+ if (constant_string_class_name
+ && !strcmp (IDENTIFIER_POINTER (name),
+ constant_string_class_name))
+ {
+ if (flag_next_runtime)
+ objc_constant_string_object_section ();
+ else
+ objc_string_object_section ();
+ }
+ /* APPLE LOCAL end constant strings */
if (!strcmp (IDENTIFIER_POINTER (name), "NSConstantString"))
objc_constant_string_object_section ();
else if (!strcmp (IDENTIFIER_POINTER (name), "NXConstantString"))
@@ -1171,6 +1392,26 @@ machopic_select_section (tree exp, int reloc,
else
base_function ();
}
+ /* APPLE LOCAL begin constant cfstrings */
+ else if (TREE_CODE (exp) == CONSTRUCTOR
+ && TREE_TYPE (exp)
+ && TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE
+ && TREE_OPERAND (exp, 0))
+ {
+ tree name = TREE_OPERAND (exp, 0);
+ if (TREE_CODE (name) == TREE_LIST && TREE_VALUE (name)
+ && TREE_CODE (TREE_VALUE (name)) == NOP_EXPR
+ && TREE_OPERAND (TREE_VALUE (name), 0)
+ && TREE_OPERAND (TREE_OPERAND (TREE_VALUE (name), 0), 0))
+ name = TREE_OPERAND (TREE_OPERAND (TREE_VALUE (name), 0), 0);
+ if (TREE_CODE (name) == VAR_DECL
+ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (name)),
+ "__CFConstantStringClassReference"))
+ cfstring_constant_object_section ();
+ else
+ base_function ();
+ }
+ /* APPLE LOCAL end constant cfstrings */
else if (TREE_CODE (exp) == VAR_DECL &&
DECL_NAME (exp) &&
TREE_CODE (DECL_NAME (exp)) == IDENTIFIER_NODE &&
@@ -1228,6 +1469,10 @@ machopic_select_section (tree exp, int reloc,
else
base_function ();
}
+ /* APPLE LOCAL begin darwin_set_section_for_var_p */
+ else if (darwin_set_section_for_var_p (exp, reloc, align))
+ ;
+ /* APPLE LOCAL end darwin_set_section_for_var_p */
else
base_function ();
}
@@ -1289,6 +1534,188 @@ darwin_globalize_label (FILE *stream, const char *name)
default_globalize_label (stream, name);
}
+/* APPLE LOCAL begin assembly "abort" directive */
+/* This can be called instead of EXIT. It will emit a '.abort' directive
+ into any existing assembly file, causing assembly to immediately abort,
+ thus preventing the assembler from spewing out numerous, irrelevant
+ error messages. */
+
+void
+abort_assembly_and_exit (status)
+ int status;
+{
+ /* If we're aborting, get the assembler to abort, too. */
+ if (status == FATAL_EXIT_CODE && asm_out_file != 0)
+ fprintf (asm_out_file, "\n.abort\n");
+
+ exit (status);
+}
+/* APPLE LOCAL end assembly "abort" directive */
+
+/* APPLE LOCAL coalescing */
+void
+darwin_asm_named_section (name, flags)
+ const char *name;
+ unsigned int flags ATTRIBUTE_UNUSED;
+{
+ fprintf (asm_out_file, ".section %s\n", name);
+}
+
+unsigned int
+darwin_section_type_flags (decl, name, reloc)
+ tree decl;
+ const char *name;
+ int reloc;
+{
+ unsigned int flags = default_section_type_flags (decl, name, reloc);
+
+ /* Weak or coalesced variables live in a writable section. */
+ if (decl != 0 && TREE_CODE (decl) != FUNCTION_DECL
+ && DECL_IS_COALESCED_OR_WEAK (decl))
+ flags |= SECTION_WRITE;
+
+ return flags;
+}
+/* APPLE LOCAL end coalescing */
+
+/* APPLE LOCAL begin double destructor turly 20020214 */
+#include "c-common.h"
+
+/* Handle __attribute__ ((apple_kext_compatibility)).
+ This only applies to darwin kexts for 295 compatibility -- it shrinks the
+ vtable for classes with this attribute (and their descendants) by not
+ outputting the new 3.0 nondeleting destructor. This means that such
+ objects CANNOT be allocated on the stack or as globals UNLESS they have
+ a completely empty `operator delete'.
+ Luckily, this fits in with the Darwin kext model.
+
+ This attribute also disables gcc3's potential overlaying of derived
+ class data members on the padding at the end of the base class. */
+
+tree
+darwin_handle_odd_attribute (node, name, args, flags, no_add_attrs)
+ tree *node;
+ tree name;
+ tree args ATTRIBUTE_UNUSED;
+ int flags ATTRIBUTE_UNUSED;
+ bool *no_add_attrs;
+{
+ if (! POSSIBLY_COMPILING_APPLE_KEXT_P ())
+ {
+ warning ("`%s' 2.95 vtable-compatability attribute applies "
+ "only when compiling a kext", IDENTIFIER_POINTER (name));
+
+ *no_add_attrs = true;
+ }
+ else if (TREE_CODE (*node) != RECORD_TYPE)
+ {
+ warning ("`%s' 2.95 vtable-compatability attribute applies "
+ "only to C++ classes", IDENTIFIER_POINTER (name));
+
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+/* APPLE LOCAL end double destructor turly 20020214 */
+
+/* APPLE LOCAL begin darwin_set_section_for_var_p turly 20020226 */
+
+/* This is specifically for any initialised static class constants
+ which may be output by the C++ front end at the end of compilation.
+ SELECT_SECTION () macro won't do because these are VAR_DECLs, not
+ STRING_CSTs or INTEGER_CSTs. And by putting 'em in appropriate
+ sections, we save space. */
+
+extern void cstring_section (void),
+ literal4_section (void), literal8_section (void);
+int
+darwin_set_section_for_var_p (exp, reloc, align)
+ tree exp;
+ int reloc;
+ int align;
+{
+ if (!reloc && TREE_CODE (exp) == VAR_DECL
+ && DECL_ALIGN (exp) == align
+ && TREE_READONLY (exp) && DECL_INITIAL (exp))
+ {
+ /* Put constant string vars in ".cstring" section. */
+
+ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (exp))) == INTEGER_TYPE
+ && integer_onep (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (exp))))
+ && TREE_CODE (DECL_INITIAL (exp)) == STRING_CST)
+ {
+
+ /* Compare string length with actual number of characters
+ the compiler will write out (which is not necessarily
+ TREE_STRING_LENGTH, in the case of a constant array of
+ characters that is not null-terminated). Select appropriate
+ section accordingly. */
+
+ if (MIN ( TREE_STRING_LENGTH (DECL_INITIAL(exp)),
+ int_size_in_bytes (TREE_TYPE (exp)))
+ == (long) strlen (TREE_STRING_POINTER (DECL_INITIAL (exp))) + 1)
+ {
+ cstring_section ();
+ return 1;
+ }
+ else
+ {
+ const_section ();
+ return 1;
+ }
+ }
+ else
+ if (TREE_READONLY (TREE_TYPE (exp))
+ && ((TREE_CODE (TREE_TYPE (exp)) == INTEGER_TYPE
+ && TREE_CODE (DECL_INITIAL (exp)) == INTEGER_CST)
+ || (TREE_CODE (TREE_TYPE (exp)) == REAL_TYPE
+ && TREE_CODE (DECL_INITIAL (exp)) == REAL_CST))
+ && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (DECL_INITIAL (exp))))
+ == INTEGER_CST)
+ {
+ tree size = TYPE_SIZE_UNIT (TREE_TYPE (DECL_INITIAL (exp)));
+ if (TREE_INT_CST_HIGH (size) != 0)
+ return 0;
+
+ /* Put integer and float consts in the literal4|8 sections. */
+
+ if (TREE_INT_CST_LOW (size) == 4)
+ {
+ literal4_section ();
+ return 1;
+ }
+ else if (TREE_INT_CST_LOW (size) == 8)
+ {
+ literal8_section ();
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+/* APPLE LOCAL end darwin_set_section_for_var_p turly 20020226 */
+
+/* APPLE LOCAL begin C++ EH */
+/* Generate a PC-relative reference to a Mach-O non-lazy-symbol. */
+void
+darwin_non_lazy_pcrel (FILE *file, rtx addr)
+{
+ const char *str;
+ const char *nlp_name;
+
+ if (GET_CODE (addr) != SYMBOL_REF)
+ abort ();
+
+ str = darwin_strip_name_encoding (XSTR (addr, 0));
+ nlp_name = machopic_non_lazy_ptr_name (str);
+ fputs ("\t.long\t", file);
+ ASM_OUTPUT_LABELREF (file, nlp_name);
+ fputs ("-.", file);
+}
+/* APPLE LOCAL end C++ EH */
+
/* Emit an assembler directive to set visibility for a symbol. The
only supported visibilities are VISIBILITY_DEFAULT and
VISIBILITY_HIDDEN; the latter corresponds to Darwin's "private
diff --git a/gcc/config/darwin.h b/gcc/config/darwin.h
index b5dae39e967..fa449dd304e 100644
--- a/gcc/config/darwin.h
+++ b/gcc/config/darwin.h
@@ -82,6 +82,16 @@ Boston, MA 02111-1307, USA. */
#undef DEFAULT_PCC_STRUCT_RETURN
#define DEFAULT_PCC_STRUCT_RETURN 0
+/* APPLE LOCAL framework headers */
+/* Need to look for framework headers. */
+#define FRAMEWORK_HEADERS
+
+/* APPLE LOCAL begin -Wfour-char-constants */
+/* Don't warn about MacOS-style 'APPL' four-char-constants. */
+#undef WARN_FOUR_CHAR_CONSTANTS
+#define WARN_FOUR_CHAR_CONSTANTS 0
+/* APPLE LOCAL end -Wfour-char-constants */
+
/* This table intercepts weirdo options whose names would interfere
with normal driver conventions, and either translates them into
standardly-named options, or adds a 'Z' so that they can get to
@@ -181,9 +191,16 @@ Boston, MA 02111-1307, USA. */
isn't. */
#undef CPP_SPEC
-#define CPP_SPEC "%{static:%{!dynamic:-D__STATIC__}}%{!static:-D__DYNAMIC__}\
- -D__APPLE_CC__=1"
+/* APPLE LOCAL -precomp-trustfile, -arch */
+/* APPLE LOCAL __APPLE__ setting, don't set __APPLE__ here, as we do it someplace else */
+#define CPP_SPEC "%{static:%{!dynamic:-D__STATIC__}}%{!static:-D__DYNAMIC__} \
+ %{precomp-trustfile} %{arch}"
+
+/* APPLE LOCAL cc1plus spec */
+#undef CC1PLUS_SPEC
+#define CC1PLUS_SPEC "-D__private_extern__=extern"
+/* APPLE LOCAL begin fat builds */
/* This is mostly a clone of the standard LINK_COMMAND_SPEC, plus
precomp, libtool, and fat build additions. Also we
don't specify a second %G after %L because libSystem is
@@ -193,10 +210,9 @@ Boston, MA 02111-1307, USA. */
specifying the handling of options understood by generic Unix
linkers, and for positional arguments like libraries. */
#define LINK_COMMAND_SPEC "\
-%{!fdump=*:%{!fsyntax-only:%{!precomp:%{!c:%{!M:%{!MM:%{!E:%{!S:\
+%{!foutput-dbg*:%{!fdump=*:%{!fsyntax-only:%{!precomp:%{!c:%{!M:%{!MM:%{!E:%{!S:\
%{!Zdynamiclib:%(linker)}%{Zdynamiclib:/usr/bin/libtool} \
- %{!Zdynamiclib:-arch %(darwin_arch)} \
- %{Zdynamiclib:-arch_only %(darwin_arch)} \
+ %(darwin_arch_spec) \
%l %X %{d} %{s} %{t} %{Z} \
%{!Zdynamiclib:%{A} %{e*} %{m} %{N} %{n} %{r} %{u*} %{x} %{z}} \
%{@:-o %f%u.out}%{!@:%{o*}%{!o:-o a.out}} \
@@ -204,7 +220,15 @@ Boston, MA 02111-1307, USA. */
%{L*} %(link_libgcc) %o %{fprofile-arcs|fprofile-generate:-lgcov} \
%{!nostdlib:%{!nodefaultlibs:%G %L}} \
%{!A:%{!nostdlib:%{!nostartfiles:%E}}} %{T*} %{F*} \
- %{!--help:%{!no-c++filt|c++filt:| c++filt3 }} }}}}}}}}"
+ %{!--help:%{!no-c++filt|c++filt:| c++filt3 }} }}}}}}}}}"
+
+/* Note that the linker
+ output is always piped through c++filt (unless -no-c++filt is
+ specified) to ensure error messages have demangled C++ names.
+ We do this even for C. */
+/* nice idea, needs some work
+ "%{!no-c++filt|c++filt:| " STANDARD_BINDIR_PREFIX cppfilt " }}}}}}}}" */
+/* APPLE LOCAL end fat builds */
/* Please keep the random linker options in alphabetical order (modulo
'Z' and 'no' prefixes). Options that can only go to one of libtool
@@ -234,7 +258,6 @@ Boston, MA 02111-1307, USA. */
%{client_name*:%e-client_name not allowed with -dynamiclib} \
%{compatibility_version*} \
%{current_version*} \
- %{Zforce_cpusubtype_ALL:%e-force_cpusubtype_ALL not allowed with -dynamiclib} \
%{Zforce_flat_namespace:%e-force_flat_namespace not allowed with -dynamiclib} \
%{Zinstall_name*:-install_name %*} \
%{keep_private_externs:%e-keep_private_externs not allowed with -dynamiclib} \
@@ -271,7 +294,7 @@ Boston, MA 02111-1307, USA. */
%{pagezero_size*} %{segs_read_*} %{seglinkedit} %{noseglinkedit} \
%{sectalign*} %{sectobjectsymbols*} %{segcreate*} %{whyload} \
%{whatsloaded} %{dylinker_install_name*} \
- %{dylinker} %{Mach} "
+ %{dylinker} %{Mach} "
/* Machine dependent libraries. */
@@ -279,6 +302,16 @@ Boston, MA 02111-1307, USA. */
#undef LIB_SPEC
#define LIB_SPEC "%{!static:-lSystem}"
+/* APPLE LOCAL begin radar 3554191 and 3127145 */
+#undef LIBGCC_SPEC
+#undef REAL_LIBGCC_SPEC
+#define REAL_LIBGCC_SPEC \
+ "%{static:-lgcc_static} \
+ %{!static:%{static-libgcc:-lgcc -lgcc_eh} \
+ %{!static-libgcc:%{shared-libgcc:-lgcc_s%M -lgcc} \
+ %{!shared-libgcc:-lgcc -lgcc_eh}}}"
+/* APPLE LOCAL end radar 3554191 and 3127145 */
+
/* We specify crt0.o as -lcrt0.o so that ld will search the library path. */
#undef STARTFILE_SPEC
@@ -302,6 +335,29 @@ Boston, MA 02111-1307, USA. */
#define DBX_DEBUGGING_INFO 1
+/* APPLE LOCAL dwarf */
+/* Also enable Dwarf 2 as an option. */
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#define DEBUG_FRAME_SECTION "__DWARFXA,__debug_frame"
+#define DEBUG_INFO_SECTION "__DWARFXA,__debug_info"
+#define DEBUG_ABBREV_SECTION "__DWARFXA,__debug_abbrev"
+#define DEBUG_ARANGES_SECTION "__DWARFXA,__debug_aranges"
+#define DEBUG_MACINFO_SECTION "__DWARFXA,__debug_macinfo"
+#define DEBUG_LINE_SECTION "__DWARFXA,__debug_line"
+#define DEBUG_LOC_SECTION "__DWARFXA,__debug_loc"
+#define DEBUG_PUBNAMES_SECTION "__DWARFXA,__debug_pubnames"
+#define DEBUG_STR_SECTION "__DWARFXA,__debug_str"
+#define DEBUG_RANGES_SECTION "__DWARFXA,__debug_ranges"
+/* APPLE LOCAL end dwarf */
+
+/* APPLE LOCAL begin gdb only used symbols */
+/* Support option to generate stabs for only used symbols. */
+
+#define DBX_ONLY_USED_SYMBOLS
+/* APPLE LOCAL end gdb only used symbols */
+
/* When generating stabs debugging, use N_BINCL entries. */
#define DBX_USE_BINCL
@@ -326,6 +382,9 @@ do { text_section (); \
#undef INIT_SECTION_ASM_OP
#define INIT_SECTION_ASM_OP
+/* APPLE LOCAL static structors in __StaticInit section */
+#define STATIC_INIT_SECTION "__TEXT,__StaticInit,regular,pure_instructions"
+
#undef INVOKE__main
#define TARGET_ASM_CONSTRUCTOR machopic_asm_out_constructor
@@ -374,9 +433,11 @@ do { text_section (); \
&& (!DECL_COMMON (DECL) || !TREE_PUBLIC (DECL))) \
|| DECL_INITIAL (DECL)) \
machopic_define_name (xname); \
- if ((TREE_STATIC (DECL) \
- && (!DECL_COMMON (DECL) || !TREE_PUBLIC (DECL))) \
- || DECL_INITIAL (DECL)) \
+ /* APPLE LOCAL coalescing */ \
+ if (! DECL_IS_COALESCED_OR_WEAK (DECL) \
+ && ((TREE_STATIC (DECL) \
+ && (!DECL_COMMON (DECL) || !TREE_PUBLIC (DECL))) \
+ || DECL_INITIAL (DECL))) \
(* targetm.encode_section_info) (DECL, DECL_RTL (DECL), false); \
ASM_OUTPUT_LABEL (FILE, xname); \
/* Darwin doesn't support zero-size objects, so give them a \
@@ -390,7 +451,9 @@ do { text_section (); \
const char *xname = NAME; \
if (GET_CODE (XEXP (DECL_RTL (DECL), 0)) != SYMBOL_REF) \
xname = IDENTIFIER_POINTER (DECL_NAME (DECL)); \
- if ((TREE_STATIC (DECL) \
+ /* APPLE LOCAL coalescing */ \
+ if (! DECL_IS_COALESCED_OR_WEAK (DECL)) \
+ if ((TREE_STATIC (DECL) \
&& (!DECL_COMMON (DECL) || !TREE_PUBLIC (DECL))) \
|| DECL_INITIAL (DECL)) \
machopic_define_name (xname); \
@@ -428,7 +491,12 @@ do { text_section (); \
machopic_validate_stub_or_non_lazy_ptr (xname, 1); \
else if (len > 14 && !strcmp ("$non_lazy_ptr", xname + len - 13)) \
machopic_validate_stub_or_non_lazy_ptr (xname, 0); \
- fputs (&xname[1], FILE); \
+ /* APPLE LOCAL begin Objective-C++ */ \
+ if (xname[1] != '"' && name_needs_quotes (&xname[1])) \
+ fprintf (FILE, "\"%s\"", &xname[1]); \
+ else \
+ fputs (&xname[1], FILE); \
+ /* APPLE LOCAL end Objective-C++ */ \
} \
else if (xname[0] == '+' || xname[0] == '-') \
fprintf (FILE, "\"%s\"", xname); \
@@ -436,6 +504,10 @@ do { text_section (); \
fprintf (FILE, "L%s", xname); \
else if (!strncmp (xname, ".objc_class_name_", 17)) \
fprintf (FILE, "%s", xname); \
+ /* APPLE LOCAL begin Objective-C++ */ \
+ else if (xname[0] != '"' && name_needs_quotes (xname)) \
+ fprintf (FILE, "\"%s\"", xname); \
+ /* APPLE LOCAL end Objective-C++ */ \
else \
fprintf (FILE, "_%s", xname); \
} while (0)
@@ -516,6 +588,8 @@ FUNCTION (void) \
in_objc_symbols, in_objc_module_info, \
in_objc_protocol, in_objc_string_object, \
in_objc_constant_string_object, \
+ /* APPLE LOCAL constant cfstrings */ \
+ in_cfstring_constant_object, \
in_objc_image_info, \
in_objc_class_names, in_objc_meth_var_names, \
in_objc_meth_var_types, in_objc_cls_refs, \
@@ -606,6 +680,14 @@ SECTION_FUNCTION (objc_string_object_section, \
SECTION_FUNCTION (objc_constant_string_object_section, \
in_objc_constant_string_object, \
".section __OBJC, __cstring_object", 1) \
+/* APPLE LOCAL begin constant cfstrings */ \
+/* Unlike constant NSStrings, constant CFStrings do not live */\
+/* in the __OBJC segment since they may also occur in pure C */\
+/* or C++ programs. */\
+SECTION_FUNCTION (cfstring_constant_object_section, \
+ in_cfstring_constant_object, \
+ ".section __DATA, __cfstring", 0) \
+/* APPLE LOCAL end constant cfstrings */ \
/* Fix-and-Continue image marker. */ \
SECTION_FUNCTION (objc_image_info_section, \
in_objc_image_info, \
@@ -646,7 +728,8 @@ SECTION_FUNCTION (darwin_exception_section, \
".section __DATA,__gcc_except_tab", 0) \
SECTION_FUNCTION (darwin_eh_frame_section, \
in_darwin_eh_frame, \
- ".section __TEXT,__eh_frame", 0) \
+ /* APPLE LOCAL eh in data segment */ \
+ ".section " EH_FRAME_SECTION_NAME ",__eh_frame" EH_FRAME_SECTION_ATTR, 0) \
\
static void \
objc_section_init (void) \
@@ -714,6 +797,13 @@ objc_section_init (void) \
#define GLOBAL_ASM_OP ".globl "
#define TARGET_ASM_GLOBALIZE_LABEL darwin_globalize_label
+/* APPLE LOCAL begin weak definition */
+#define ASM_WEAK_DEFINITIONIZE_LABEL(FILE, NAME) \
+ do { const char* _x = (NAME); if (!!strncmp (_x, "_OBJC_", 6)) { \
+ fputs (".weak_definition ", FILE); assemble_name (FILE, _x); \
+ fputs ("\n", FILE); }} while (0)
+/* APPLE LOCAL end weak definition */
+
/* Emit an assembler directive to set visibility for a symbol. Used
to support visibility attribute and Darwin's private extern
feature. */
@@ -754,6 +844,11 @@ enum machopic_addr_class {
#undef TARGET_STRIP_NAME_ENCODING
#define TARGET_STRIP_NAME_ENCODING darwin_strip_name_encoding
+/* APPLE LOCAL what is this for? */
+/* Be conservative and always redo the encoding. */
+
+#define REDO_SECTION_INFO_P(DECL) (1)
+
#define GEN_BINDER_NAME_FOR_STUB(BUF,STUB,STUB_LENGTH) \
do { \
const char *const stub_ = (STUB); \
@@ -823,21 +918,235 @@ enum machopic_addr_class {
#define ASM_OUTPUT_DWARF_DELTA(FILE,SIZE,LABEL1,LABEL2) \
darwin_asm_output_dwarf_delta (FILE, SIZE, LABEL1, LABEL2)
-#define TARGET_TERMINATE_DW2_EH_FRAME_INFO false
+/* APPLE LOCAL begin coalescing */
+/* The __eh_frame section attributes: a "normal" section by default. */
+#define EH_FRAME_SECTION_ATTR /*nothing*/
+
+/* The only EH item we can't do PC-relative is the reference to
+ __gxx_personality_v0. So we cheat, since moving the __eh_frame section
+ to the DATA segment is expensive.
+ We output a 4-byte encoding - including the last 2 chars of the
+ personality function name: {0, 'g', 'v', '0', 0xff}
+ (The first zero byte coincides with the "absolute" encoding.)
+ This means we can now use DW_EH_PE_pcrel for everything. And there
+ was much rejoicing. */
+
+#define EH_FRAME_SECTION_NAME "__TEXT"
+
+#define COALESCED_UNWIND_INFO
+
+#ifdef COALESCED_UNWIND_INFO
+#undef EH_FRAME_SECTION_ATTR
+#define EH_FRAME_SECTION_ATTR ",coalesced,no_toc+strip_static_syms"
+
+
+/* Implicit or explicit template instantiations' EH info are GLOBAL
+ symbols. ("Implicit" here implies "coalesced".)
+ Note that .weak_definition is commented out until 'as' supports it. */
+
+
+#define APPLE_ASM_WEAK_DEF_FMT_STRING(LAB) \
+ (name_needs_quotes(LAB) ? ".weak_definition \"%s.eh\"\n" : ".weak_definition %s.eh\n")
-#define DARWIN_REGISTER_TARGET_PRAGMAS() \
+#define ASM_OUTPUT_COAL_UNWIND_LABEL(FILE, LAB, COAL, PUBLIC, PRIVATE_EXTERN) \
+ do { \
+ if ((COAL) || (PUBLIC) || (PRIVATE_EXTERN)) \
+ fprintf ((FILE), \
+ (name_needs_quotes(LAB) ? "%s \"%s.eh\"\n" : "%s %s.eh\n"), \
+ ((PUBLIC) ? ".globl" : ".private_extern"), \
+ (LAB)); \
+ if (COAL) \
+ fprintf ((FILE), \
+ APPLE_ASM_WEAK_DEF_FMT_STRING(LAB), \
+ (LAB)); \
+ fprintf ((FILE), \
+ (name_needs_quotes(LAB) ? "\"%s.eh\":\n" : "%s.eh:\n"), \
+ (LAB)); \
+ } while (0)
+
+#endif /* COALESCED_UNWIND_INFO */
+
+#define ASM_MAYBE_OUTPUT_ENCODED_ADDR_RTX(ASM_OUT_FILE, ENCODING, SIZE, ADDR, DONE) \
+ if (ENCODING == ASM_PREFERRED_EH_DATA_FORMAT (2, 1)) { \
+ darwin_non_lazy_pcrel (ASM_OUT_FILE, ADDR); \
+ goto DONE; \
+ }
+/* APPLE LOCAL end coalescing */
+
+/* APPLE LOCAL OS pragma hook */
+#define REGISTER_OS_PRAGMAS(PFILE) \
do { \
c_register_pragma (0, "mark", darwin_pragma_ignore); \
c_register_pragma (0, "options", darwin_pragma_options); \
c_register_pragma (0, "segment", darwin_pragma_ignore); \
c_register_pragma (0, "unused", darwin_pragma_unused); \
+ /* APPLE LOCAL begin Macintosh alignment 2002-1-22 ff */ \
+ cpp_register_pragma (PFILE, 0, "pack", darwin_pragma_pack); \
+ /* APPLE LOCAL end Macintosh alignment 2002-1-22 ff */ \
+ /* APPLE LOCAL begin CALL_ON_LOAD/CALL_ON_UNLOAD pragmas 20020202 turly */ \
+ cpp_register_pragma (PFILE, 0, "CALL_ON_LOAD", \
+ darwin_pragma_call_on_load); \
+ cpp_register_pragma (PFILE, 0, "CALL_ON_UNLOAD", \
+ darwin_pragma_call_on_unload); \
+ /* APPLE LOCAL end CALL_ON_LOAD/CALL_ON_UNLOAD pragmas 20020202 turly */ \
+ /* APPLE LOCAL begin CALL_ON_MODULE_BIND deprecated 2002-4-10 ff */ \
+ cpp_register_pragma (PFILE, 0, "CALL_ON_MODULE_BIND", darwin_pragma_call_on_module_bind); \
+ /* APPLE LOCAL end CALL_ON_MODULE_BIND deprecated 2002-4-10 ff */ \
+ /* APPLE LOCAL begin temporary pragmas 2001-07-05 sts */ \
+ cpp_register_pragma (PFILE, 0, "CC_NO_MACH_TEXT_SECTIONS", darwin_pragma_cc_no_mach_text_sections); \
+ cpp_register_pragma (PFILE, 0, "CC_OPT_OFF", darwin_pragma_cc_opt_off); \
+ cpp_register_pragma (PFILE, 0, "CC_OPT_ON", darwin_pragma_cc_opt_on); \
+ cpp_register_pragma (PFILE, 0, "CC_OPT_RESTORE", darwin_pragma_cc_opt_restore); \
+ cpp_register_pragma (PFILE, 0, "CC_WRITABLE_STRINGS", darwin_pragma_cc_writable_strings); \
+ cpp_register_pragma (PFILE, 0, "CC_NON_WRITABLE_STRINGS", darwin_pragma_cc_non_writable_strings); \
+ /* APPLE LOCAL end temporary pragmas 2001-07-05 sts */ \
} while (0)
+/* APPLE LOCAL coalescing */
+extern void make_decl_coalesced (tree, int private_extern_p);
+
+/* Coalesced symbols are private extern by default. This behavior can
+ be changed with the EXPERIMENTAL export-coalesced flag. There is
+ not (yet?) any means for coalesced symbols to be selectively exported. */
+
+#define MAKE_DECL_COALESCED(DECL) \
+ make_decl_coalesced (DECL, !flag_export_coalesced)
+
+#define COALESCE_STATIC_THUNK(DECL, PUBLIC) \
+ make_decl_coalesced (DECL, !PUBLIC)
+
+extern int flag_coalescing_enabled,
+ flag_coalesce_templates, flag_weak_coalesced_definitions;
+
+/* Coalesced symbols are private extern by default. This EXPERIMENTAL
+ flag will make them global instead. */
+extern int flag_export_coalesced;
+
+#define COALESCING_ENABLED_P() (flag_coalescing_enabled && MACHOPIC_INDIRECT)
+
+#define COALESCING_TEMPLATES_P(DECL) \
+ (COALESCING_ENABLED_P () && flag_coalesce_templates)
+
+#define TARGET_TERMINATE_DW2_EH_FRAME_INFO false
+
+#define MARK_TEMPLATE_COALESCED(DECL) \
+ do { \
+ if (COALESCING_TEMPLATES_P (DECL)) { \
+ int explicit = TREE_PUBLIC (DECL) \
+ && (DECL_EXPLICIT_INSTANTIATION (DECL) \
+ /* Or an explicitly instantiated function. */ \
+ || (TREE_CODE (DECL) == FUNCTION_DECL \
+ && DECL_INTERFACE_KNOWN (DECL) \
+ && DECL_NOT_REALLY_EXTERN (DECL)) \
+ /* Or a non-common VAR_DECL. */ \
+ || (TREE_CODE (DECL) == VAR_DECL && ! DECL_COMMON (DECL))); \
+ if (!explicit \
+ || /*it IS explicit, but*/ !flag_weak_coalesced_definitions) \
+ MAKE_DECL_COALESCED (DECL); \
+ } \
+ } while (0)
+
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION darwin_asm_named_section
+#undef TARGET_SECTION_TYPE_FLAGS
+#define TARGET_SECTION_TYPE_FLAGS darwin_section_type_flags
+
+#define DECL_IS_COALESCED_OR_WEAK(DECL) \
+ (DECL_COALESCED (DECL) || DECL_WEAK (DECL))
+
+extern int machopic_var_referred_to_p PARAMS ((const char*));
+#define MACHOPIC_VAR_REFERRED_TO_P(NAME) machopic_var_referred_to_p (NAME)
+/* APPLE LOCAL end coalescing */
+
+/* APPLE LOCAL insert assembly ".abort" directive on fatal error */
+#define EXIT_FROM_FATAL_DIAGNOSTIC(status) abort_assembly_and_exit (status)
+extern void abort_assembly_and_exit (int status) ATTRIBUTE_NORETURN;
+
+/* APPLE LOCAL begin Macintosh alignment 2002-2-13 ff */
+#ifdef RS6000_VECTOR_ALIGNMENT
+/* When adjusting (lowering) the alignment of fields when in the
+ mac68k alignment mode, the 128-bit alignment of vectors *MUST*
+ be preserved. */
+#define PEG_ALIGN_FOR_MAC68K(DESIRED) \
+ ((TARGET_ALTIVEC && (DESIRED) == RS6000_VECTOR_ALIGNMENT) \
+ ? RS6000_VECTOR_ALIGNMENT \
+ : MIN ((DESIRED), 16))
+#else
+#define PEG_ALIGN_FOR_MAC68K(DESIRED) MIN ((DESIRED), 16)
+#endif
+/* APPLE LOCAL end Macintosh alignment 2002-2-13 ff */
+
+/* APPLE LOCAL begin double destructor turly 20020214 */
+/* Handle __attribute__((apple_kext_compatibility)). This shrinks the
+ vtable for all classes with this attribute (and their descendants)
+ back to 2.95 dimensions. It causes only the deleting destructor to
+ be emitted, which means that such objects CANNOT be allocated on
+ the stack or as globals. Luckily, this fits in with the Darwin
+ kext model. */
+#define SUBTARGET_ATTRIBUTE_TABLE \
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ \
+ { "apple_kext_compatibility", 0, 0, 0, 1, 0, darwin_handle_odd_attribute },
+
+/* APPLE KEXT stuff -- only applies with pure static C++ code. */
+/* NB: Can't use flag_apple_kext as it's in the C++ FE, and this macro
+ is used in the back end for the above __attribute__ handler. */
+#define POSSIBLY_COMPILING_APPLE_KEXT_P() \
+ (! MACHOPIC_INDIRECT && c_dialect_cxx())
+
+/* Need a mechanism to tell whether a C++ operator delete is empty so
+ we overload TREE_SIDE_EFFECTS here (it is unused for FUNCTION_DECLS.)
+ Fromage, c'est moi! */
+#define CHECK_TRIVIAL_FUNCTION(DECL) \
+ do { \
+ const char *_name = IDENTIFIER_POINTER (DECL_NAME (DECL)); \
+ if (POSSIBLY_COMPILING_APPLE_KEXT_P () && DECL_SAVED_TREE (DECL) \
+ && strstr (_name, "operator delete") \
+ && TREE_CODE (DECL_SAVED_TREE (DECL)) == COMPOUND_STMT \
+ && compound_body_is_empty_p ( \
+ COMPOUND_BODY (DECL_SAVED_TREE (DECL))))\
+ TREE_SIDE_EFFECTS (DECL) = 1; \
+ } while (0)
+
+/* gcc3 initialises the vptr field of all objects so that it points at the
+ first virtual function slot, NOT the base address of the vtable proper.
+ This is different from gcc2.95 which always initialised the vptr to
+ point at the base address of the vtable. The difference here is 8 bytes.
+ So, for 2.95 compatibility, we need to:
+
+ (1) subtract 8 from the vptr initialiser, and
+ (2) add 2 to every vfunc index. (2 * 4 == 8.)
+
+ This is getting ever cheesier. */
+
+#define VPTR_INITIALIZER_ADJUSTMENT 8
+#define ADJUST_VTABLE_INDEX(IDX, VTBL) \
+ do { \
+ if (POSSIBLY_COMPILING_APPLE_KEXT_P () && flag_apple_kext) \
+ (IDX) = fold (build (PLUS_EXPR, TREE_TYPE (IDX), IDX, size_int (2))); \
+ } while (0)
+/* APPLE LOCAL end double destructor turly 20020214 */
+
+/* APPLE LOCAL begin zerofill turly 20020218 */
+/* This keeps uninitialized data from bloating the data when -fno-common.
+ Radar 2863107. */
+#define ASM_OUTPUT_ZEROFILL(FILE, NAME, SIZE, ALIGNMENT) \
+ do { \
+ fputs (".zerofill __DATA, __common, ", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ", " HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) (SIZE)); \
+ fprintf ((FILE), ", " HOST_WIDE_INT_PRINT_DEC "\n", \
+ (HOST_WIDE_INT) (ALIGNMENT)); \
+ in_section = no_section; \
+ } while (0)
+/* APPLE LOCAL end zerofill turly 20020218 */
+
#undef ASM_APP_ON
#define ASM_APP_ON ""
#undef ASM_APP_OFF
#define ASM_APP_OFF ""
+extern const char *machopic_non_lazy_ptr_name PARAMS ((const char *));
+
void darwin_register_frameworks (int);
#define TARGET_EXTRA_INCLUDES darwin_register_frameworks
diff --git a/gcc/config/h8300/t-rtems b/gcc/config/h8300/t-rtems
deleted file mode 100644
index 104ee2366f1..00000000000
--- a/gcc/config/h8300/t-rtems
+++ /dev/null
@@ -1,7 +0,0 @@
-# Custom multilibs for RTEMS
-
-# -mn is not applicable to RTEMS (-mn implies 16bit void*)
-
-MULTILIB_OPTIONS = mh/ms mint32
-MULTILIB_DIRNAMES = h8300h h8300s int32
-MULTILIB_EXCEPTIONS = mint32
diff --git a/gcc/config/host-linux.c b/gcc/config/host-linux.c
deleted file mode 100644
index 7302d381dbe..00000000000
--- a/gcc/config/host-linux.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/* Linux host-specific hook definitions.
- Copyright (C) 2004 Free Software Foundation, Inc.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 2, or (at your
- option) any later version.
-
- GCC is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
-
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
-#include <sys/mman.h>
-#include "hosthooks.h"
-#include "hosthooks-def.h"
-
-
-/* Linux has a feature called exec-shield-randomize that perturbs the
- address of non-fixed mapped segments by a (relatively) small amount.
- The feature is intended to make it harder to attack the system with
- buffer overflow attacks, since every invocation of a program will
- have its libraries and data segments at slightly different addresses.
-
- This feature causes us problems with PCH because it makes it that
- much harder to acquire a stable location at which to map our PCH
- data file.
-
- [ The feature causes other points of non-determinism within the
- compiler as well, so we'd *really* like to be able to have the
- driver disable exec-shield-randomize for the process group, but
- that isn't possible at present. ]
-
- We're going to try several things:
-
- * Select an architecture specific address as "likely" and see
- if that's free. For our 64-bit hosts, we can easily choose
- an address in Never Never Land.
-
- * If exec-shield-randomize is disabled, then just use the
- address chosen by mmap in step one.
-
- * If exec-shield-randomize is enabled, then temporarily allocate
- 32M of memory as a buffer, then allocate PCH memory, then
- free the buffer. The theory here is that the perturbation is
- no more than 16M, and so by allocating our buffer larger than
- that we make it considerably more likely that the address will
- be free when we want to load the data back.
-*/
-
-#undef HOST_HOOKS_GT_PCH_GET_ADDRESS
-#define HOST_HOOKS_GT_PCH_GET_ADDRESS linux_gt_pch_get_address
-
-/* For various ports, try to guess a fixed spot in the vm space
- that's probably free. */
-#if defined(__alpha)
-# define TRY_EMPTY_VM_SPACE 0x10000000000
-#elif defined(__ia64)
-# define TRY_EMPTY_VM_SPACE 0x2000000100000000
-#elif defined(__x86_64)
-# define TRY_EMPTY_VM_SPACE 0x1000000000
-#elif defined(__i386)
-# define TRY_EMPTY_VM_SPACE 0x60000000
-#else
-# define TRY_EMPTY_VM_SPACE 0
-#endif
-
-/* Determine a location where we might be able to reliably allocate SIZE
- bytes. FD is the PCH file, though we should return with the file
- unmapped. */
-
-static void *
-linux_gt_pch_get_address (size_t size, int fd)
-{
- size_t buffer_size = 32 * 1024 * 1024;
- void *addr, *buffer;
- FILE *f;
- bool randomize_on;
-
- addr = mmap ((void *)TRY_EMPTY_VM_SPACE, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE, fd, 0);
-
- /* If we failed the map, that means there's *no* free space. */
- if (addr == (void *) MAP_FAILED)
- return NULL;
- /* Unmap the area before returning. */
- munmap (addr, size);
-
- /* If we got the exact area we requested, then that's great. */
- if (TRY_EMPTY_VM_SPACE && addr == (void *) TRY_EMPTY_VM_SPACE)
- return addr;
-
- /* If we didn't, then we need to look to see if randomization is on. */
- f = fopen ("/proc/sys/kernel/exec-shield-randomize", "r");
- randomize_on = false;
- if (f != NULL)
- {
- char buf[100];
- size_t c;
-
- c = fread (buf, 1, sizeof buf - 1, f);
- if (c > 0)
- {
- buf[c] = '\0';
- randomize_on = (atoi (buf) > 0);
- }
- fclose (f);
- }
-
- /* If it isn't, then accept the address that mmap selected as fine. */
- if (!randomize_on)
- return addr;
-
- /* Otherwise, we need to try again with buffer space. */
- buffer = mmap (0, buffer_size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
- addr = mmap (0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
- if (buffer != (void *) MAP_FAILED)
- munmap (buffer, buffer_size);
- if (addr == (void *) MAP_FAILED)
- return NULL;
- munmap (addr, size);
-
- return addr;
-}
-
-
-const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc/config/host-solaris.c b/gcc/config/host-solaris.c
deleted file mode 100644
index 4fa7a5b1ad0..00000000000
--- a/gcc/config/host-solaris.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/* Solaris host-specific hook definitions.
- Copyright (C) 2004 Free Software Foundation, Inc.
-
- This file is part of GCC.
-
- GCC is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 2, or (at your
- option) any later version.
-
- GCC is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- License for more details.
-
- You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the
- Free Software Foundation, 59 Temple Place - Suite 330, Boston,
- MA 02111-1307, USA. */
-
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
-#include <sys/mman.h>
-#include "hosthooks.h"
-#include "hosthooks-def.h"
-
-
-#undef HOST_HOOKS_GT_PCH_USE_ADDRESS
-#define HOST_HOOKS_GT_PCH_USE_ADDRESS sol_gt_pch_use_address
-
-/* Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
- mapping the data at BASE, -1 if we couldn't. */
-
-static int
-sol_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
-{
- void *addr;
-
- /* We're called with size == 0 if we're not planning to load a PCH
- file at all. This allows the hook to free any static space that
- we might have allocated at link time. */
- if (size == 0)
- return -1;
-
- addr = mmap (base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
- fd, offset);
-
- /* Solaris isn't good about honoring the mmap START parameter
- without MAP_FIXED set. Before we give up, search the desired
- address space with mincore to see if the space is really free. */
- if (addr != base)
- {
- size_t page_size = getpagesize();
- char one_byte;
- size_t i;
-
- if (addr != (void *) MAP_FAILED)
- munmap (addr, size);
-
- errno = 0;
- for (i = 0; i < size; i += page_size)
- if (mincore ((char *)base + i, page_size, (void *)&one_byte) == -1
- && errno == ENOMEM)
- continue; /* The page is not mapped. */
- else
- break;
-
- if (i >= size)
- addr = mmap (base, size,
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED,
- fd, offset);
- }
-
- return addr == base ? 1 : -1;
-}
-
-
-const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc/config/i386/darwin.h b/gcc/config/i386/darwin.h
index accffeee90c..18f9ec2abe2 100644
--- a/gcc/config/i386/darwin.h
+++ b/gcc/config/i386/darwin.h
@@ -23,7 +23,13 @@ Boston, MA 02111-1307, USA. */
#undef TARGET_MACHO
#define TARGET_MACHO 1
-#define TARGET_VERSION fprintf (stderr, " (i686 Darwin)");
+/* APPLE LOCAL begin default to ppro */
+/* Default to -mcpu=pentiumpro instead of i386 (radar 2730299) ilr */
+#undef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT 4
+/* APPLE LOCAL end default to ppro */
+
+#define TARGET_VERSION fprintf (stderr, " (i386 Darwin)");
#define TARGET_OS_CPP_BUILTINS() \
do \
@@ -39,17 +45,34 @@ Boston, MA 02111-1307, USA. */
the kernel or some such. */
#undef CC1_SPEC
-#define CC1_SPEC "%{!static:-fPIC}"
+/* APPLE LOCAL dynamic-no-pic */
+/* APPLE LOCAL ignore -mcpu=G4 -mcpu=G5 */
+/* When -mdynamic-no-pic finally works, remove the "xx" below. FIXME!! */
+#define CC1_SPEC "%{!static:%{!mxxdynamic-no-pic:-fPIC}} %<faltivec %<mlong-branch %<mlongcall %<mcpu=G4 %<mcpu=G5"
+
+/* APPLE LOCAL AltiVec */
+#define CPP_ALTIVEC_SPEC "%<faltivec"
-#define ASM_SPEC "-arch i686 \
+#define ASM_SPEC "%(darwin_arch_spec)\
-force_cpusubtype_ALL \
%{Zforce_cpusubtype_ALL:-force_cpusubtype_ALL} \
%{!Zforce_cpusubtype_ALL:%{mmmx:-force_cpusubtype_ALL}\
%{msse:-force_cpusubtype_ALL}\
%{msse2:-force_cpusubtype_ALL}}"
+#define DARWIN_ARCH_SPEC \
+"%{march=i386: %{!Zdynamiclib:-arch i386} %{Zdynamiclib:-arch_only i386}} \
+ %{march=i486: %{!Zdynamiclib:-arch i486} %{Zdynamiclib:-arch_only i486}} \
+ %{march=i586: %{!Zdynamiclib:-arch i586} %{Zdynamiclib:-arch_only i586}} \
+ %{march=pentium: %{!Zdynamiclib:-arch pentium} %{Zdynamiclib:-arch_only pentium}} \
+ %{march=pentiumpro: %{!Zdynamiclib:-arch pentpro} %{Zdynamiclib:-arch_only pentpro}} \
+ %{march=i686: %{!Zdynamiclib:-arch i686} %{Zdynamiclib:-arch_only i686}} \
+ %{march=pentium3: %{!Zdynamiclib:-arch pentIIm3} %{Zdynamiclib:-arch_only pentIIm3}} \
+ %{!mcpu*:%{!march*:%{!Zdynamiclib:-arch i686} %{Zdynamiclib:-arch_only i686}}} "
+
#undef SUBTARGET_EXTRA_SPECS
#define SUBTARGET_EXTRA_SPECS \
+ { "darwin_arch_spec", DARWIN_ARCH_SPEC }, \
{ "darwin_arch", "i686" },
/* Use the following macro for any Darwin/x86-specific command-line option
@@ -104,7 +127,12 @@ Boston, MA 02111-1307, USA. */
#define ASM_OUTPUT_ALIGN(FILE,LOG) \
do { if ((LOG) != 0) \
{ \
- if (in_text_section ()) \
+ /* APPLE LOCAL coalescing */ \
+ if (in_text_section () \
+ || in_unlikely_text_section () \
+ || darwin_named_section_is ("__TEXT,__textcoal,coalesced") \
+ || darwin_named_section_is ("__TEXT,__textcoal_nt,coalesced,no_toc") \
+ || darwin_named_section_is (STATIC_INIT_SECTION)) \
fprintf (FILE, "\t%s %d,0x90\n", ALIGN_ASM_OP, (LOG)); \
else \
fprintf (FILE, "\t%s %d\n", ALIGN_ASM_OP, (LOG)); \
@@ -127,6 +155,27 @@ Boston, MA 02111-1307, USA. */
assemble_name ((FILE), (NAME)), \
fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", (ROUNDED)))
+
+/* APPLE LOCAL begin Macintosh alignment 2002-2-19 ff */
+#define MASK_ALIGN_NATURAL 0x40000000
+#define TARGET_ALIGN_NATURAL (target_flags & MASK_ALIGN_NATURAL)
+#define rs6000_alignment_flags target_flags
+#define MASK_ALIGN_MAC68K 0x20000000
+#define TARGET_ALIGN_MAC68K (target_flags & MASK_ALIGN_MAC68K)
+
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+ {"align-mac68k", MASK_ALIGN_MAC68K, \
+ N_("Align structs and unions according to mac68k rules")}, \
+ {"align-power", - (MASK_ALIGN_MAC68K | MASK_ALIGN_NATURAL), \
+ N_("Align structs and unions according to PowerPC rules")}, \
+ {"align-natural", MASK_ALIGN_NATURAL, \
+ N_("Align structs and unions according to natural rules")}, \
+ {"dynamic-no-pic", MASK_MACHO_DYNAMIC_NO_PIC, \
+ N_("Generate code suitable for executables (NOT shared libs)")},\
+ {"no-dynamic-no-pic", -MASK_MACHO_DYNAMIC_NO_PIC, ""},
+/* APPLE LOCAL end Macintosh alignment 2002-2-19 ff */
+
/* Darwin profiling -- call mcount. */
#undef FUNCTION_PROFILER
#define FUNCTION_PROFILER(FILE, LABELNO) \
@@ -139,3 +188,14 @@ Boston, MA 02111-1307, USA. */
} \
else fprintf (FILE, "\tcall mcount\n"); \
} while (0)
+
+/* APPLE LOCAL SSE stack alignment */
+#define BASIC_STACK_BOUNDARY (128)
+
+#undef SUBTARGET_OVERRIDE_OPTIONS
+/* Force Darwin/x86 to default as "-march=i686 -mcpu=pentium4". */
+#define SUBTARGET_OVERRIDE_OPTIONS \
+ do { \
+ if (!ix86_arch_string) ix86_arch_string = "pentiumpro"; \
+ if (!ix86_tune_string) ix86_tune_string = "pentium4"; \
+ } while (0)
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 58e26333a5c..582102c18ea 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -459,6 +459,34 @@ struct processor_costs pentium4_cost = {
const struct processor_costs *ix86_cost = &pentium_cost;
+/* APPLE LOCAL begin Altivec */
+/* vector types */
+static GTY(()) tree unsigned_V16QI_type_node;
+static GTY(()) tree unsigned_V4SI_type_node;
+static GTY(()) tree unsigned_V8QI_type_node;
+static GTY(()) tree unsigned_V8HI_type_node;
+static GTY(()) tree unsigned_V4HI_type_node;
+static GTY(()) tree unsigned_V2HI_type_node;
+static GTY(()) tree unsigned_V2SI_type_node;
+static GTY(()) tree unsigned_V2DI_type_node;
+static GTY(()) tree unsigned_V1DI_type_node;
+
+static GTY(()) tree V16QI_type_node;
+static GTY(()) tree V4SF_type_node;
+static GTY(()) tree V4SI_type_node;
+static GTY(()) tree V8QI_type_node;
+static GTY(()) tree V8HI_type_node;
+static GTY(()) tree V4HI_type_node;
+static GTY(()) tree V2HI_type_node;
+static GTY(()) tree V2SI_type_node;
+static GTY(()) tree V2SF_type_node;
+static GTY(()) tree V2DI_type_node;
+static GTY(()) tree V2DF_type_node;
+static GTY(()) tree V16SF_type_node;
+static GTY(()) tree V1DI_type_node;
+static GTY(()) tree V4DF_type_node;
+/* APPLE LOCAL end Altivec */
+
/* Processor feature/optimization bitmasks. */
#define m_386 (1<<PROCESSOR_I386)
#define m_486 (1<<PROCESSOR_I486)
@@ -1004,6 +1032,12 @@ static void init_ext_80387_constants (void);
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST ix86_address_cost
+/* APPLE LOCAL begin SSE stack alignment */
+#ifndef BASIC_STACK_BOUNDARY
+#define BASIC_STACK_BOUNDARY (32)
+#endif
+/* APPLE LOCAL end SSE stack alignment */
+
#undef TARGET_FIXED_CONDITION_CODE_REGS
#define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
#undef TARGET_CC_MODES_COMPATIBLE
@@ -1345,9 +1379,11 @@ override_options (void)
The default of 128 bits is for Pentium III's SSE __m128, but we
don't want additional code to keep the stack aligned when
optimizing for code size. */
+ /* APPLE LOCAL begin SSE stack alignment */
ix86_preferred_stack_boundary = (optimize_size
- ? TARGET_64BIT ? 128 : 32
+ ? TARGET_64BIT ? 128 : BASIC_STACK_BOUNDARY
: 128);
+ /* APPLE LOCAL end SSE stack alignment */
if (ix86_preferred_stack_boundary_string)
{
i = atoi (ix86_preferred_stack_boundary_string);
@@ -1485,11 +1521,24 @@ override_options (void)
internal_label_prefix_len = p - internal_label_prefix;
*p = '\0';
}
+
+ /* APPLE LOCAL begin dynamic-no-pic */
+ if (flag_pic == 1)
+ {
+ /* Darwin doesn't support -fpic. */
+ warning ("-fpic is not supported; -fPIC assumed");
+ flag_pic = 2;
+ }
+ /* APPLE LOCAL end dynamic-no-pic */
}
void
optimization_options (int level, int size ATTRIBUTE_UNUSED)
{
+ /* APPLE LOCAL disable strict aliasing; breaks too much existing code. */
+#if TARGET_MACHO
+ flag_strict_aliasing = 0;
+#endif
/* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
make the problem with not enough registers even worse. */
#ifdef INSN_SCHEDULING
@@ -1529,6 +1578,11 @@ const struct attribute_spec ix86_attribute_table[] =
#endif
{ "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
{ "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
+ /* APPLE LOCAL begin double destructor */
+#ifdef SUBTARGET_ATTRIBUTE_TABLE
+ SUBTARGET_ATTRIBUTE_TABLE
+#endif
+ /* APPLE LOCAL end double destructor */
{ NULL, 0, 0, false, false, false, NULL }
};
@@ -2959,7 +3013,19 @@ ix86_build_builtin_va_list (void)
return build_array_type (record, build_index_type (size_zero_node));
}
-/* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments.
+
+ CUM is as above.
+
+ MODE and TYPE are the mode and type of the current parameter.
+
+ PRETEND_SIZE is a variable that should be set to the amount of stack
+ that must be pushed by the prolog to pretend that our caller pushed
+ it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed. */
static void
ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index b90f590b98a..a658ec7fb1b 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -34,6 +34,9 @@ Boston, MA 02111-1307, USA. */
ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many
that start with ASM_ or end in ASM_OP. */
+/* APPLE LOCAL fat builds */
+#define DEFAULT_TARGET_ARCH "i386"
+
/* Define the specific costs for a given cpu */
struct processor_costs {
@@ -207,6 +210,11 @@ extern int target_flags;
#endif
#endif
+/* APPLE LOCAL begin hot/cold partitioning */
+#define HAS_LONG_COND_BRANCH 1
+#define HAS_LONG_UNCOND_BRANCH 1
+/* APPLE LOCAL end hot/cold partitioning */
+
/* Avoid adding %gs:0 in TLS references; use %gs:address directly. */
#define TARGET_TLS_DIRECT_SEG_REFS (target_flags & MASK_TLS_DIRECT_SEG_REFS)
@@ -774,7 +782,8 @@ extern int x86_prefetch_sse;
#define PARM_BOUNDARY BITS_PER_WORD
/* Boundary (in *bits*) on which stack pointer should be aligned. */
-#define STACK_BOUNDARY BITS_PER_WORD
+/* APPLE LOCAL 3232990 - compiler should obey -mpreferred-stack-boundary */
+#define STACK_BOUNDARY ((ix86_preferred_stack_boundary > 128) ? 128 : ix86_preferred_stack_boundary)
/* Boundary (in *bits*) on which the stack pointer prefers to be
aligned; the compiler cannot rely on having this alignment. */
@@ -825,7 +834,8 @@ extern int x86_prefetch_sse;
#define BIGGEST_FIELD_ALIGNMENT 32
#endif
#else
-#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+/* APPLE LOCAL Macintosh alignment */
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED, FIRST_FIELD_P) \
x86_field_alignment (FIELD, COMPUTED)
#endif
diff --git a/gcc/config/i386/t-djgpp b/gcc/config/i386/t-djgpp
deleted file mode 100644
index 7b54b7ba7aa..00000000000
--- a/gcc/config/i386/t-djgpp
+++ /dev/null
@@ -1,2 +0,0 @@
-# Location of DJGPP's header directory.
-NATIVE_SYSTEM_HEADER_DIR=$(DJDIR)/include
diff --git a/gcc/config/ia64/t-hpux b/gcc/config/ia64/t-hpux
deleted file mode 100644
index 597c2acbe2a..00000000000
--- a/gcc/config/ia64/t-hpux
+++ /dev/null
@@ -1,43 +0,0 @@
-# We need multilib support for HPUX's ILP32 & LP64 modes.
-
-LIBGCC = stmp-multilib
-INSTALL_LIBGCC = install-multilib
-
-MULTILIB_OPTIONS = milp32/mlp64
-MULTILIB_DIRNAMES = hpux32 hpux64
-MULTILIB_MATCHES =
-
-# Support routines for HP-UX 128 bit floats.
-
-LIB2FUNCS_EXTRA=quadlib.c
-
-quadlib.c: $(srcdir)/config/ia64/quadlib.c
- cat $(srcdir)/config/ia64/quadlib.c > quadlib.c
-
-# We get an undefined main when building a cross compiler because our
-# linkspec has "-u main" and we want that for linking but it makes
-# LIBGCC1_TEST fail because it uses -nostdlib -nostartup.
-
-LIBGCC1_TEST =
-
-# We do not want to include the EH stuff that linux uses, we want to use
-# the HP-UX libunwind library.
-
-LIB2ADDEH =
-
-SHLIB_EXT = .so
-# Must include -lunwind in the link, so that libgcc_s.so has the necessary
-# DT_NEEDED entry for libunwind.
-SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
- -Wl,+h,@shlib_base_name@.so.0 \
- -o @shlib_base_name@.so @multilib_flags@ @shlib_objs@ -lunwind -lc && \
- rm -f @shlib_base_name@.so.0 && \
- $(LN_S) @shlib_base_name@.so @shlib_base_name@.so.0
-# $(slibdir) double quoted to protect it from expansion while building
-# libgcc.mk. We want this delayed until actual install time.
-SHLIB_INSTALL = $(INSTALL_DATA) @shlib_base_name@.so $$(DESTDIR)$$(slibdir)/@shlib_base_name@.so.0; \
- rm -f $$(DESTDIR)$$(slibdir)/@shlib_base_name@.so; \
- $(LN_S) @shlib_base_name@.so.0 $$(DESTDIR)$$(slibdir)/@shlib_base_name@.so; \
- chmod +x $$(DESTDIR)$$(slibdir)/@shlib_base_name@.so
-
-SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
diff --git a/gcc/config/mips/t-mips b/gcc/config/mips/t-mips
deleted file mode 100644
index 497f4fb20cb..00000000000
--- a/gcc/config/mips/t-mips
+++ /dev/null
@@ -1,21 +0,0 @@
-# fp-bit and dp-bit are really part of libgcc1, but this will cause
-# them to be built correctly, so... [taken from t-sparclite]
-# We want fine grained libraries, so use the new code to build the
-# floating point emulation libraries.
-FPBIT = fp-bit.c
-DPBIT = dp-bit.c
-
-dp-bit.c: $(srcdir)/config/fp-bit.c
- echo '#ifdef __MIPSEL__' > dp-bit.c
- echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
- echo '#endif' >> dp-bit.c
- echo '#define QUIET_NAN_NEGATED' >> dp-bit.c
- cat $(srcdir)/config/fp-bit.c >> dp-bit.c
-
-fp-bit.c: $(srcdir)/config/fp-bit.c
- echo '#define FLOAT' > fp-bit.c
- echo '#ifdef __MIPSEL__' >> fp-bit.c
- echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
- echo '#endif' >> fp-bit.c
- echo '#define QUIET_NAN_NEGATED' >> fp-bit.c
- cat $(srcdir)/config/fp-bit.c >> fp-bit.c
diff --git a/gcc/config/rs6000/altivec.h b/gcc/config/rs6000/altivec.h
index 2ae567ef3d4..07278e95789 100644
--- a/gcc/config/rs6000/altivec.h
+++ b/gcc/config/rs6000/altivec.h
@@ -36,10 +36,11 @@
#error Use the "-maltivec" flag to enable PowerPC AltiVec support
#endif
-/* You are allowed to undef these for C++ compatibility. */
-#define vector __vector
-#define pixel __pixel
-#define bool __bool
+/* APPLE LOCAL begin AltiVec */
+/* The keywords 'vector', 'pixel' and 'bool' are now implemented as
+ context-sensitive macros, and hence should not be defined
+ unconditionally. */
+/* APPLE LOCAL end AltiVec */
/* Condition register codes for AltiVec predicates. */
@@ -117,7 +118,7 @@ inline void vec_dst (const vector unsigned int *, int, const int) __attribute__
inline void vec_dst (const vector signed int *, int, const int) __attribute__ ((always_inline));
inline void vec_dst (const vector bool int *, int, const int) __attribute__ ((always_inline));
inline void vec_dst (const vector float *, int, const int) __attribute__ ((always_inline));
-inline void vec_dst (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dst (const unsigned char *, int, const int) __attribute__ ((always_inline));
inline void vec_dst (const signed char *, int, const int) __attribute__ ((always_inline));
inline void vec_dst (const unsigned short *, int, const int) __attribute__ ((always_inline));
inline void vec_dst (const short *, int, const int) __attribute__ ((always_inline));
@@ -138,7 +139,7 @@ inline void vec_dstst (const vector unsigned int *, int, const int) __attribute_
inline void vec_dstst (const vector signed int *, int, const int) __attribute__ ((always_inline));
inline void vec_dstst (const vector bool int *, int, const int) __attribute__ ((always_inline));
inline void vec_dstst (const vector float *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstst (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstst (const unsigned char *, int, const int) __attribute__ ((always_inline));
inline void vec_dstst (const signed char *, int, const int) __attribute__ ((always_inline));
inline void vec_dstst (const unsigned short *, int, const int) __attribute__ ((always_inline));
inline void vec_dstst (const short *, int, const int) __attribute__ ((always_inline));
@@ -159,7 +160,7 @@ inline void vec_dststt (const vector unsigned int *, int, const int) __attribute
inline void vec_dststt (const vector signed int *, int, const int) __attribute__ ((always_inline));
inline void vec_dststt (const vector bool int *, int, const int) __attribute__ ((always_inline));
inline void vec_dststt (const vector float *, int, const int) __attribute__ ((always_inline));
-inline void vec_dststt (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dststt (const unsigned char *, int, const int) __attribute__ ((always_inline));
inline void vec_dststt (const signed char *, int, const int) __attribute__ ((always_inline));
inline void vec_dststt (const unsigned short *, int, const int) __attribute__ ((always_inline));
inline void vec_dststt (const short *, int, const int) __attribute__ ((always_inline));
@@ -180,7 +181,7 @@ inline void vec_dstt (const vector unsigned int *, int, const int) __attribute__
inline void vec_dstt (const vector signed int *, int, const int) __attribute__ ((always_inline));
inline void vec_dstt (const vector bool int *, int, const int) __attribute__ ((always_inline));
inline void vec_dstt (const vector float *, int, const int) __attribute__ ((always_inline));
-inline void vec_dstt (const int *, int, const int) __attribute__ ((always_inline));
+inline void vec_dstt (const unsigned char *, int, const int) __attribute__ ((always_inline));
inline void vec_dstt (const signed char *, int, const int) __attribute__ ((always_inline));
inline void vec_dstt (const unsigned short *, int, const int) __attribute__ ((always_inline));
inline void vec_dstt (const short *, int, const int) __attribute__ ((always_inline));
@@ -195,15 +196,20 @@ inline vector signed int vec_sld (vector signed int, vector signed int, const in
inline vector unsigned int vec_sld (vector unsigned int, vector unsigned int, const int) __attribute__ ((always_inline));
inline vector signed short vec_sld (vector signed short, vector signed short, const int) __attribute__ ((always_inline));
inline vector unsigned short vec_sld (vector unsigned short, vector unsigned short, const int) __attribute__ ((always_inline));
+inline vector pixel vec_sld (vector pixel, vector pixel, const int) __attribute__ ((always_inline));
inline vector signed char vec_sld (vector signed char, vector signed char, const int) __attribute__ ((always_inline));
inline vector unsigned char vec_sld (vector unsigned char, vector unsigned char, const int) __attribute__ ((always_inline));
inline vector signed char vec_splat (vector signed char, const int) __attribute__ ((always_inline));
inline vector unsigned char vec_splat (vector unsigned char, const int) __attribute__ ((always_inline));
+inline vector bool char vec_splat (vector bool char, const int) __attribute__ ((always_inline));
inline vector signed short vec_splat (vector signed short, const int) __attribute__ ((always_inline));
inline vector unsigned short vec_splat (vector unsigned short, const int) __attribute__ ((always_inline));
+inline vector bool short vec_splat (vector bool short, const int) __attribute__ ((always_inline));
+inline vector pixel vec_splat (vector pixel, const int) __attribute__ ((always_inline));
inline vector float vec_splat (vector float, const int) __attribute__ ((always_inline));
inline vector signed int vec_splat (vector signed int, const int) __attribute__ ((always_inline));
inline vector unsigned int vec_splat (vector unsigned int, const int) __attribute__ ((always_inline));
+inline vector bool int vec_splat (vector bool int, const int) __attribute__ ((always_inline));
inline vector signed char vec_splat_s8 (const int) __attribute__ ((always_inline));
inline vector signed short vec_splat_s16 (const int) __attribute__ ((always_inline));
inline vector signed int vec_splat_s32 (const int) __attribute__ ((always_inline));
@@ -8897,7 +8903,7 @@ __ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
((vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
__ch (__bin_args_eq (vector bool int, (a1), vector bool int, (a2)), \
((vector bool int) __builtin_altivec_vmrghw ((vector signed int) (a1), (vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_mergeh")))))))))))
+ __builtin_altivec_compiletime_error ("vec_mergeh"))))))))))))
#define vec_vmrghw(a1, a2) \
__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
@@ -8945,7 +8951,7 @@ __ch (__bin_args_eq (vector unsigned int, (a1), vector unsigned int, (a2)), \
((vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
__ch (__bin_args_eq (vector bool int, (a1), vector bool int, (a2)), \
((vector bool int) __builtin_altivec_vmrglw ((vector signed int) (a1), (vector signed int) (a2))), \
- __builtin_altivec_compiletime_error ("vec_mergel"))))))))
+ __builtin_altivec_compiletime_error ("vec_mergel"))))))))))))
#define vec_vmrglw(a1, a2) \
__ch (__bin_args_eq (vector float, (a1), vector float, (a2)), \
diff --git a/gcc/config/rs6000/builtin.ops b/gcc/config/rs6000/builtin.ops
new file mode 100644
index 00000000000..a28e35654fc
--- /dev/null
+++ b/gcc/config/rs6000/builtin.ops
@@ -0,0 +1,297 @@
+# APPLE LOCAL file AltiVec
+# ops-to-gp -gcc vec.ops builtin.ops
+# @ betype betype-code type-spelling
+@ @ float BETYPE_R4 float
+@ @ ushort BETYPE_U4 unsigned=short
+@ @ uint BETYPE_U4 unsigned=int
+@ @ ulong BETYPE_U4 unsigned=long
+@ @ immed_u2 U2 0..3
+@ @ immed_u4 U4 0..15
+@ @ immed_s5 I5 -16..15
+@ @ immed_u5 U5 0..31
+@ @ int BETYPE_I4 int
+@ @ long BETYPE_I4 long
+@ @ ptr PTR void=*
+@ @ v16 BETYPE_V16 vec_type
+@ @ void BETYPE_I4 void
+# fetype betype [code [spelling]]
+@ float_ptr ptr i float=*
+@ const_float_ptr ptr i float=*
+@ const_volatile_float_ptr ptr i float=*
+@ int int i
+@ int_ptr ptr i int=*
+@ long_ptr ptr i long=*
+@ const_int_ptr ptr i int=*
+@ const_long_ptr ptr i long=*
+@ const_volatile_int_ptr ptr i int=*
+@ const_volatile_long_ptr ptr i long=*
+@ immed_s5 immed_s5 A
+@ immed_u5 immed_u5 B
+@ immed_u4 immed_u4 C
+@ immed_u2 immed_u2 D
+@ cc24f int j=24=f
+@ cc24fd int j=24=f=d
+@ cc24fr int j=24=f=r
+@ cc24t int j=24=t
+@ cc24td int j=24=t=d
+@ cc24tr int j=24=t=r
+@ cc26f int j=26=f
+@ cc26fd int j=26=f=d
+@ cc26fr int j=26=f=r
+@ cc26t int j=26=t
+@ cc26td int j=26=t=d
+@ cc26tr int j=26=t=r
+@ short_ptr ptr i short=*
+@ signed_char_ptr ptr i signed=char=*
+@ unsigned_char_ptr ptr i unsigned=char=*
+@ unsigned_short_ptr ptr i unsigned=short=*
+@ unsigned_int_ptr ptr i unsigned=int=*
+@ unsigned_long_ptr ptr i unsigned=long=*
+@ const_short_ptr ptr i short=*
+@ const_signed_char_ptr ptr i signed=char=*
+@ const_unsigned_char_ptr ptr i unsigned=char=*
+@ const_unsigned_short_ptr ptr i unsigned=short=*
+@ const_unsigned_int_ptr ptr i unsigned=int=*
+@ const_unsigned_long_ptr ptr i unsigned=long=*
+@ const_volatile_short_ptr ptr i short=*
+@ const_volatile_signed_char_ptr ptr i signed=char=*
+@ const_volatile_unsigned_char_ptr ptr i unsigned=char=*
+@ const_volatile_unsigned_short_ptr ptr i unsigned=short=*
+@ const_volatile_unsigned_int_ptr ptr i unsigned=int=*
+@ const_volatile_unsigned_long_ptr ptr i unsigned=long=*
+@ vec_b16 v16 x vec_b16
+@ vec_b16_load_op v16 xl vec_b16
+@ vec_b16_ptr ptr i vec_b16=*
+@ const_vec_b16_ptr ptr i vec_b16=*
+@ vec_b32 v16 x vec_b32
+@ vec_b32_load_op v16 xl vec_b32
+@ vec_b32_ptr ptr i vec_b32=*
+@ const_vec_b32_ptr ptr i vec_b32=*
+@ vec_b8 v16 x vec_b8
+@ vec_b8_load_op v16 xl vec_b8
+@ vec_b8_ptr ptr i vec_b8=*
+@ const_vec_b8_ptr ptr i vec_b8=*
+@ vec_f32 v16 x vec_f32
+@ vec_f32_load_op v16 xl vec_f32
+@ vec_f32_ptr ptr i vec_f32=*
+@ const_vec_f32_ptr ptr i vec_f32=*
+@ vec_p16 v16 x vec_p16
+@ vec_p16_load_op v16 xl vec_p16
+@ vec_p16_ptr ptr i vec_p16=*
+@ const_vec_p16_ptr ptr i vec_p16=*
+@ vec_s16 v16 x vec_s16
+@ vec_s16_load_op v16 xl vec_s16
+@ vec_s16_ptr ptr i vec_s16=*
+@ const_vec_s16_ptr ptr i vec_s16=*
+@ vec_s32 v16 x vec_s32
+@ vec_s32_load_op v16 xl vec_s32
+@ vec_s32_ptr ptr i vec_s32=*
+@ const_vec_s32_ptr ptr i vec_s32=*
+@ vec_s8 v16 x vec_s8
+@ vec_s8_load_op v16 xl vec_s8
+@ vec_s8_ptr ptr i vec_s8=*
+@ const_vec_s8_ptr ptr i vec_s8=*
+@ vec_u16 v16 x vec_u16
+@ vec_u16_load_op v16 xl vec_u16
+@ vec_u16_ptr ptr i vec_u16=*
+@ const_vec_u16_ptr ptr i vec_u16=*
+@ vec_u32 v16 x vec_u32
+@ vec_u32_load_op v16 xl vec_u32
+@ vec_u32_ptr ptr i vec_u32=*
+@ const_vec_u32_ptr ptr i vec_u32=*
+@ vec_u8 v16 x vec_u8
+@ vec_u8_load_op v16 xl vec_u8
+@ vec_u8_ptr ptr i vec_u8=*
+@ const_vec_u8_ptr ptr i vec_u8=*
+@ void_store_op void s
+@ volatile_void void v
+@ volatile_void_load_op void vl
+@ volatile_void_store_op void vs
+@ volatile_vec_u16 v16 vx vec_u16
+@ char_ptr ptr i char=*
+@ const_char_ptr ptr i char=*
+# @ @ instruction type
+@ @ @ MOP_mfvscr fxu
+@ @ @ MOP_mtvscr fxu
+@ @ @ MOP_dss load
+@ @ @ MOP_dssall load
+@ @ @ MOP_dst load
+@ @ @ MOP_dstst load
+@ @ @ MOP_dststt load
+@ @ @ MOP_dstt load
+@ @ @ MOP_lvebx load
+@ @ @ MOP_lvehx load
+@ @ @ MOP_lvewx load
+@ @ @ MOP_lvsl load
+@ @ @ MOP_lvsr load
+@ @ @ MOP_lvx load
+@ @ @ MOP_lvxl load
+@ @ @ MOP_stvebx store
+@ @ @ MOP_stvehx store
+@ @ @ MOP_stvewx store
+@ @ @ MOP_stvx store
+@ @ @ MOP_stvxl store
+@ @ @ MOP_vaddcuw simple
+@ @ @ MOP_vaddfp fp
+@ @ @ MOP_vaddsbs simple
+@ @ @ MOP_vaddshs simple
+@ @ @ MOP_vaddsws simple
+@ @ @ MOP_vaddubm simple
+@ @ @ MOP_vaddubs simple
+@ @ @ MOP_vadduhm simple
+@ @ @ MOP_vadduhs simple
+@ @ @ MOP_vadduwm simple
+@ @ @ MOP_vadduws simple
+@ @ @ MOP_vand simple
+@ @ @ MOP_vandc simple
+@ @ @ MOP_vavgsb simple
+@ @ @ MOP_vavgsh simple
+@ @ @ MOP_vavgsw simple
+@ @ @ MOP_vavgub simple
+@ @ @ MOP_vavguh simple
+@ @ @ MOP_vavguw simple
+@ @ @ MOP_vcfsx fp
+@ @ @ MOP_vcfux fp
+@ @ @ MOP_vcmpbfp simple
+@ @ @ MOP_vcmpbfpD simple
+@ @ @ MOP_vcmpeqfp simple
+@ @ @ MOP_vcmpeqfpD simple
+@ @ @ MOP_vcmpequb simple
+@ @ @ MOP_vcmpequbD simple
+@ @ @ MOP_vcmpequh simple
+@ @ @ MOP_vcmpequhD simple
+@ @ @ MOP_vcmpequw simple
+@ @ @ MOP_vcmpequwD simple
+@ @ @ MOP_vcmpgefp simple
+@ @ @ MOP_vcmpgefpD simple
+@ @ @ MOP_vcmpgtfp simple
+@ @ @ MOP_vcmpgtfpD simple
+@ @ @ MOP_vcmpgtsb simple
+@ @ @ MOP_vcmpgtsbD simple
+@ @ @ MOP_vcmpgtsh simple
+@ @ @ MOP_vcmpgtshD simple
+@ @ @ MOP_vcmpgtsw simple
+@ @ @ MOP_vcmpgtswD simple
+@ @ @ MOP_vcmpgtub simple
+@ @ @ MOP_vcmpgtubD simple
+@ @ @ MOP_vcmpgtuh simple
+@ @ @ MOP_vcmpgtuhD simple
+@ @ @ MOP_vcmpgtuw simple
+@ @ @ MOP_vcmpgtuwD simple
+@ @ @ MOP_vctsxs fp
+@ @ @ MOP_vctuxs fp
+@ @ @ MOP_vexptefp fp
+@ @ @ MOP_vlogefp fp
+@ @ @ MOP_vmaddfp fp
+@ @ @ MOP_vmaxfp simple
+@ @ @ MOP_vmaxsb simple
+@ @ @ MOP_vmaxsh simple
+@ @ @ MOP_vmaxsw simple
+@ @ @ MOP_vmaxub simple
+@ @ @ MOP_vmaxuh simple
+@ @ @ MOP_vmaxuw simple
+@ @ @ MOP_vmhaddshs complex
+@ @ @ MOP_vmhraddshs complex
+@ @ @ MOP_vminfp simple
+@ @ @ MOP_vminsb simple
+@ @ @ MOP_vminsh simple
+@ @ @ MOP_vminsw simple
+@ @ @ MOP_vminub simple
+@ @ @ MOP_vminuh simple
+@ @ @ MOP_vminuw simple
+@ @ @ MOP_vmladduhm complex
+@ @ @ MOP_vmrghb perm
+@ @ @ MOP_vmrghh perm
+@ @ @ MOP_vmrghw perm
+@ @ @ MOP_vmrglb perm
+@ @ @ MOP_vmrglh perm
+@ @ @ MOP_vmrglw perm
+@ @ @ MOP_vmsummbm complex
+@ @ @ MOP_vmsumshm complex
+@ @ @ MOP_vmsumshs complex
+@ @ @ MOP_vmsumubm complex
+@ @ @ MOP_vmsumuhm complex
+@ @ @ MOP_vmsumuhs complex
+@ @ @ MOP_vmulesb complex
+@ @ @ MOP_vmulesh complex
+@ @ @ MOP_vmuleub complex
+@ @ @ MOP_vmuleuh complex
+@ @ @ MOP_vmulosb complex
+@ @ @ MOP_vmulosh complex
+@ @ @ MOP_vmuloub complex
+@ @ @ MOP_vmulouh complex
+@ @ @ MOP_vnmsubfp fp
+@ @ @ MOP_vnor simple
+@ @ @ MOP_vor simple
+@ @ @ MOP_vperm perm
+@ @ @ MOP_vpkpx perm
+@ @ @ MOP_vpkshss perm
+@ @ @ MOP_vpkshus perm
+@ @ @ MOP_vpkswss perm
+@ @ @ MOP_vpkswus perm
+@ @ @ MOP_vpkuhum perm
+@ @ @ MOP_vpkuhus perm
+@ @ @ MOP_vpkuwum perm
+@ @ @ MOP_vpkuwus perm
+@ @ @ MOP_vrefp fp
+@ @ @ MOP_vrfim fp
+@ @ @ MOP_vrfin fp
+@ @ @ MOP_vrfip fp
+@ @ @ MOP_vrfiz fp
+@ @ @ MOP_vrlb simple
+@ @ @ MOP_vrlh simple
+@ @ @ MOP_vrlw simple
+@ @ @ MOP_vrsqrtefp fp
+@ @ @ MOP_vsel simple
+@ @ @ MOP_vsl simple
+@ @ @ MOP_vslb simple
+@ @ @ MOP_vsldoi perm
+@ @ @ MOP_vslh simple
+@ @ @ MOP_vslo perm_bug
+@ @ @ MOP_vslw simple
+@ @ @ MOP_vspltb perm
+@ @ @ MOP_vsplth perm
+@ @ @ MOP_vspltisb perm
+@ @ @ MOP_vspltish perm
+@ @ @ MOP_vspltisw perm
+@ @ @ MOP_vspltw perm
+@ @ @ MOP_vsr simple
+@ @ @ MOP_vsrab simple
+@ @ @ MOP_vsrah simple
+@ @ @ MOP_vsraw simple
+@ @ @ MOP_vsrb simple
+@ @ @ MOP_vsrh simple
+@ @ @ MOP_vsro perm_bug
+@ @ @ MOP_vsrw simple
+@ @ @ MOP_vsubcuw simple
+@ @ @ MOP_vsubfp fp
+@ @ @ MOP_vsubsbs simple
+@ @ @ MOP_vsubshs simple
+@ @ @ MOP_vsubsws simple
+@ @ @ MOP_vsububm simple
+@ @ @ MOP_vsububs simple
+@ @ @ MOP_vsubuhm simple
+@ @ @ MOP_vsubuhs simple
+@ @ @ MOP_vsubuwm simple
+@ @ @ MOP_vsubuws simple
+@ @ @ MOP_vsum2sws complex
+@ @ @ MOP_vsum4sbs complex
+@ @ @ MOP_vsum4shs complex
+@ @ @ MOP_vsum4ubs complex
+@ @ @ MOP_vsumsws complex
+@ @ @ MOP_vupkhpx perm
+@ @ @ MOP_vupkhsb perm
+@ @ @ MOP_vupkhsh perm
+@ @ @ MOP_vupklpx perm
+@ @ @ MOP_vupklsb perm
+@ @ @ MOP_vupklsh perm
+@ @ @ MOP_vxor simple
+# The vec_abs and vec_abss operations identify their variants with insn_name.
+# Map these into a valid insn code (xfx_perm).
+@ @ @ 1 perm
+@ @ @ 2 perm
+@ @ @ 3 perm
+@ @ @ 4 perm
+@ @ @ 5 perm
+@ @ @ 6 perm
+@ @ @ 7 perm
diff --git a/gcc/config/rs6000/darwin-fpsave.asm b/gcc/config/rs6000/darwin-fpsave.asm
new file mode 100644
index 00000000000..d25a1141c45
--- /dev/null
+++ b/gcc/config/rs6000/darwin-fpsave.asm
@@ -0,0 +1,69 @@
+/* APPLE LOCAL file performance improvement */
+/* This file contains the floating-point save and restore routines.
+
+ THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
+ ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
+ for example. For FP reg saves/restores, it takes one instruction
+ (4 bytes) to do the operation; for Vector regs, 2 instructions are
+ required (8 bytes.)
+
+ MORAL: DO NOT MESS AROUND WITH THESE FUNCTIONS! */
+
+.text
+ .align 2
+
+/* saveFP saves R0 -- assumed to be the callers LR -- to 8(R1). */
+
+.private_extern saveFP
+saveFP:
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stw r0,8(r1)
+ blr
+
+/* restFP restores the caller`s LR from 8(R1). Note that the code for
+ this starts at the offset of F30 restoration, so calling this
+ routine in an attempt to restore only F31 WILL NOT WORK (it would
+ be a stupid thing to do, anyway.) */
+
+.private_extern restFP
+restFP:
+ lfd f14,-144(r1)
+ lfd f15,-136(r1)
+ lfd f16,-128(r1)
+ lfd f17,-120(r1)
+ lfd f18,-112(r1)
+ lfd f19,-104(r1)
+ lfd f20,-96(r1)
+ lfd f21,-88(r1)
+ lfd f22,-80(r1)
+ lfd f23,-72(r1)
+ lfd f24,-64(r1)
+ lfd f25,-56(r1)
+ lfd f26,-48(r1)
+ lfd f27,-40(r1)
+ lfd f28,-32(r1)
+ lfd f29,-24(r1)
+ /* <OFFSET OF F30 RESTORE> restore callers LR */
+ lwz r0,8(r1)
+ lfd f30,-16(r1)
+ /* and prepare for return to caller */
+ mtlr r0
+ lfd f31,-8(r1)
+ blr
diff --git a/gcc/config/rs6000/darwin-vecsave.asm b/gcc/config/rs6000/darwin-vecsave.asm
new file mode 100644
index 00000000000..8c5352ee709
--- /dev/null
+++ b/gcc/config/rs6000/darwin-vecsave.asm
@@ -0,0 +1,133 @@
+/* APPLE LOCAL file AltiVec */
+/* Vector save/restore routines for Darwin. Note that each vector
+ save/restore requires 2 instructions (8 bytes.)
+
+ THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
+ ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
+ for example. For FP reg saves/restores, it takes one instruction
+ (4 bytes) to do the operation; for Vector regs, 2 instructions are
+ required (8 bytes.)
+
+ MORAL: DO NOT MESS AROUND WITH THESE FUNCTIONS! */
+
+.text
+ .align 2
+
+.private_extern saveVEC
+saveVEC:
+ li r11,-192
+ stvx v20,r11,r0
+ li r11,-176
+ stvx v21,r11,r0
+ li r11,-160
+ stvx v22,r11,r0
+ li r11,-144
+ stvx v23,r11,r0
+ li r11,-128
+ stvx v24,r11,r0
+ li r11,-112
+ stvx v25,r11,r0
+ li r11,-96
+ stvx v26,r11,r0
+ li r11,-80
+ stvx v27,r11,r0
+ li r11,-64
+ stvx v28,r11,r0
+ li r11,-48
+ stvx v29,r11,r0
+ li r11,-32
+ stvx v30,r11,r0
+ li r11,-16
+ stvx v31,r11,r0
+ blr
+
+.private_extern restVEC
+restVEC:
+ li r11,-192
+ lvx v20,r11,r0
+ li r11,-176
+ lvx v21,r11,r0
+ li r11,-160
+ lvx v22,r11,r0
+ li r11,-144
+ lvx v23,r11,r0
+ li r11,-128
+ lvx v24,r11,r0
+ li r11,-112
+ lvx v25,r11,r0
+ li r11,-96
+ lvx v26,r11,r0
+ li r11,-80
+ lvx v27,r11,r0
+ li r11,-64
+ lvx v28,r11,r0
+ li r11,-48
+ lvx v29,r11,r0
+ li r11,-32
+ lvx v30,r11,r0
+ li r11,-16
+ lvx v31,r11,r0
+ blr
+
+/* saveVEC_vr11 -- as saveVEC but VRsave is returned in R11. */
+
+.private_extern saveVEC_vr11
+saveVEC_vr11:
+ li r11,-192
+ stvx v20,r11,r0
+ li r11,-176
+ stvx v21,r11,r0
+ li r11,-160
+ stvx v22,r11,r0
+ li r11,-144
+ stvx v23,r11,r0
+ li r11,-128
+ stvx v24,r11,r0
+ li r11,-112
+ stvx v25,r11,r0
+ li r11,-96
+ stvx v26,r11,r0
+ li r11,-80
+ stvx v27,r11,r0
+ li r11,-64
+ stvx v28,r11,r0
+ li r11,-48
+ stvx v29,r11,r0
+ li r11,-32
+ stvx v30,r11,r0
+ li r11,-16
+ stvx v31,r11,r0
+ mfspr r11,VRsave
+ blr
+
+/* As restVec, but the original VRsave value passed in R10. */
+
+.private_extern restVEC_vr10
+restVEC_vr10:
+ li r11,-192
+ lvx v20,r11,r0
+ li r11,-176
+ lvx v21,r11,r0
+ li r11,-160
+ lvx v22,r11,r0
+ li r11,-144
+ lvx v23,r11,r0
+ li r11,-128
+ lvx v24,r11,r0
+ li r11,-112
+ lvx v25,r11,r0
+ li r11,-96
+ lvx v26,r11,r0
+ li r11,-80
+ lvx v27,r11,r0
+ li r11,-64
+ lvx v28,r11,r0
+ li r11,-48
+ lvx v29,r11,r0
+ li r11,-32
+ lvx v30,r11,r0
+ li r11,-16
+ lvx v31,r11,r0
+ /* restore VRsave from R10. */
+ mtspr VRsave,r10
+ blr
diff --git a/gcc/config/rs6000/darwin-worldsave.asm b/gcc/config/rs6000/darwin-worldsave.asm
new file mode 100644
index 00000000000..2d43f61adde
--- /dev/null
+++ b/gcc/config/rs6000/darwin-worldsave.asm
@@ -0,0 +1,233 @@
+/* APPLE LOCAL file world save/restore */
+/* This file contains the exception-handling save_world and
+ restore_world routines, which need to do a run-time check to see if
+ they should save and restore the vector regs. */
+
+.data
+ .align 2
+
+#ifdef __DYNAMIC__
+
+.non_lazy_symbol_pointer
+L_has_vec$non_lazy_ptr:
+ .indirect_symbol __cpu_has_altivec
+ .long 0
+
+#else
+
+/* For static, "pretend" we have a non-lazy-pointer. */
+
+L_has_vec$non_lazy_ptr:
+ .long __cpu_has_altivec
+
+#endif
+
+
+.text
+ .align 2
+
+/* save_world and rest_world save/restore F14-F31 and possibly V20-V31
+ (assuming you have a CPU with vector registers; we use a global var
+ provided by the System Framework to determine this.)
+
+ SAVE_WORLD takes R0 (the caller`s caller`s return address) and R11
+ (the stack frame size) as parameters. It returns VRsave in R0 if
+ we`re on a CPU with vector regs.
+
+ With gcc3, we now need to save and restore CR as well, since gcc3's
+ scheduled prologs can cause comparisons to be moved before calls to
+ save_world!
+
+ USES: R0 R11 R12 */
+
+.private_extern save_world
+save_world:
+ stw r0,8(r1)
+ mflr r0
+ bcl 20,31,Ls$pb
+Ls$pb: mflr r12
+ addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Ls$pb)
+ lwz r12,lo16(L_has_vec$non_lazy_ptr-Ls$pb)(r12)
+ mtlr r0
+ lwz r12,0(r12)
+ /* grab CR */
+ mfcr r0
+ /* test HAS_VEC */
+ cmpwi r12,0
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stmw r13,-220(r1)
+ /* stash CR */
+ stw r0,4(r1)
+ /* set R12 pointing at Vector Reg save area */
+ addi r12,r1,-224
+ /* allocate stack frame */
+ stwux r1,r1,r11
+ /* ...but return if HAS_VEC is zero */
+ bne+ L$saveVMX
+ /* Not forgetting to restore CR. */
+ mtcr r0
+ blr
+
+L$saveVMX:
+ /* We're saving Vector regs too. */
+ /* Restore CR from R0. No More Branches! */
+ mtcr r0
+
+ /* We should really use VRSAVE to figure out which vector regs
+ we actually need to save and restore. Some other time :-/ */
+
+ li r11,-192
+ stvx v20,r11,r12
+ li r11,-176
+ stvx v21,r11,r12
+ li r11,-160
+ stvx v22,r11,r12
+ li r11,-144
+ stvx v23,r11,r12
+ li r11,-128
+ stvx v24,r11,r12
+ li r11,-112
+ stvx v25,r11,r12
+ li r11,-96
+ stvx v26,r11,r12
+ li r11,-80
+ stvx v27,r11,r12
+ li r11,-64
+ stvx v28,r11,r12
+ li r11,-48
+ stvx v29,r11,r12
+ li r11,-32
+ stvx v30,r11,r12
+ mfspr r0,VRsave
+ li r11,-16
+ stvx v31,r11,r12
+ /* VRsave lives at -224(R1) */
+ stw r0,0(r12)
+ blr
+
+
+/* eh_rest_world_r10 is jumped to, not called, so no need to worry about LR.
+ R10 is the C++ EH stack adjust parameter, we return to the caller`s caller.
+
+ USES: R0 R10 R11 R12 and R7 R8
+ RETURNS: C++ EH Data registers (R3 - R6.)
+
+ We now set up R7/R8 and jump to rest_world_eh_r7r8.
+
+ rest_world doesn't use the R10 stack adjust parameter, nor does it
+ pick up the R3-R6 exception handling stuff. */
+
+.private_extern rest_world
+rest_world:
+ /* Pickup previous SP */
+ lwz r11, 0(r1)
+ li r7, 0
+ lwz r8, 8(r11)
+ li r10, 0
+ b rest_world_eh_r7r8
+
+.private_extern eh_rest_world_r10
+eh_rest_world_r10:
+ /* Pickup previous SP */
+ lwz r11, 0(r1)
+ mr r7,r10
+ lwz r8, 8(r11)
+ /* pickup the C++ EH data regs (R3 - R6.) */
+ lwz r6,-420(r11)
+ lwz r5,-424(r11)
+ lwz r4,-428(r11)
+ lwz r3,-432(r11)
+
+ b rest_world_eh_r7r8
+
+/* rest_world_eh_r7r8 is jumped to -- not called! -- when we're doing
+ the exception-handling epilog. R7 contains the offset to add to
+ the SP, and R8 contains the 'real' return address.
+
+ USES: R0 R11 R12 [R7/R8]
+ RETURNS: C++ EH Data registers (R3 - R6.) */
+
+rest_world_eh_r7r8:
+ bcl 20,31,Lr7r8$pb
+Lr7r8$pb: mflr r12
+ lwz r11,0(r1)
+ /* R11 := previous SP */
+ addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Lr7r8$pb)
+ lwz r12,lo16(L_has_vec$non_lazy_ptr-Lr7r8$pb)(r12)
+ lwz r0,4(r11)
+ /* R0 := old CR */
+ lwz r12,0(r12)
+ /* R12 := HAS_VEC */
+ mtcr r0
+ cmpwi r12,0
+ lmw r13,-220(r11)
+ beq L.rest_world_fp_eh
+ /* restore VRsave and V20..V31 */
+ lwz r0,-224(r11)
+ li r12,-416
+ mtspr VRsave,r0
+ lvx v20,r11,r12
+ li r12,-400
+ lvx v21,r11,r12
+ li r12,-384
+ lvx v22,r11,r12
+ li r12,-368
+ lvx v23,r11,r12
+ li r12,-352
+ lvx v24,r11,r12
+ li r12,-336
+ lvx v25,r11,r12
+ li r12,-320
+ lvx v26,r11,r12
+ li r12,-304
+ lvx v27,r11,r12
+ li r12,-288
+ lvx v28,r11,r12
+ li r12,-272
+ lvx v29,r11,r12
+ li r12,-256
+ lvx v30,r11,r12
+ li r12,-240
+ lvx v31,r11,r12
+
+L.rest_world_fp_eh:
+ lfd f14,-144(r11)
+ lfd f15,-136(r11)
+ lfd f16,-128(r11)
+ lfd f17,-120(r11)
+ lfd f18,-112(r11)
+ lfd f19,-104(r11)
+ lfd f20,-96(r11)
+ lfd f21,-88(r11)
+ lfd f22,-80(r11)
+ lfd f23,-72(r11)
+ lfd f24,-64(r11)
+ lfd f25,-56(r11)
+ lfd f26,-48(r11)
+ lfd f27,-40(r11)
+ lfd f28,-32(r11)
+ lfd f29,-24(r11)
+ lfd f30,-16(r11)
+ /* R8 is the exception-handler's address */
+ mtctr r8
+ lfd f31,-8(r11)
+ /* set SP to original value + R7 offset */
+ add r1,r11,r7
+ bctr
diff --git a/gcc/config/rs6000/darwin.h b/gcc/config/rs6000/darwin.h
index 6f193f739c3..d35685ee5de 100644
--- a/gcc/config/rs6000/darwin.h
+++ b/gcc/config/rs6000/darwin.h
@@ -96,19 +96,33 @@ do { \
#define CC1_SPEC "\
%{gused: -feliminate-unused-debug-symbols %<gused }\
%{static: %{Zdynamic: %e conflicting code gen style switches are used}}\
-%{!static:%{!mdynamic-no-pic:-fPIC}}"
+%{!static:%{!fast:%{!fastf:%{!fastcp:%{!mdynamic-no-pic:-fPIC}}}}}"
/* It's virtually impossible to predict all the possible combinations
of -mcpu and -maltivec and whatnot, so just supply
-force_cpusubtype_ALL if any are seen. Radar 3492132 against the
assembler is asking for a .machine directive so we could get this
really right. */
-#define ASM_SPEC "-arch ppc \
+#define ASM_SPEC " %(darwin_arch_spec)\
%{Zforce_cpusubtype_ALL:-force_cpusubtype_ALL} \
- %{!Zforce_cpusubtype_ALL:%{maltivec|mcpu=*|mpowerpc64:-force_cpusubtype_ALL}}"
+ %{!Zforce_cpusubtype_ALL:%{maltivec|faltivec:-force_cpusubtype_ALL}}"
+
+/* APPLE LOCAL begin 3492132 */
+#define DARWIN_ARCH_SPEC \
+"%{mcpu=601: %{!Zdynamiclib:-arch ppc601} %{Zdynamiclib:-arch_only ppc601}} \
+ %{mcpu=603: %{!Zdynamiclib:-arch ppc603} %{Zdynamiclib:-arch_only ppc603}} \
+ %{mcpu=604: %{!Zdynamiclib:-arch ppc604} %{Zdynamiclib:-arch_only ppc604}} \
+ %{mcpu=604e: %{!Zdynamiclib:-arch ppc604e} %{Zdynamiclib:-arch_only ppc604}} \
+ %{mcpu=750: %{!Zdynamiclib:-arch ppc750} %{Zdynamiclib:-arch_only ppc750}} \
+ %{mcpu=7400: %{!Zdynamiclib:-arch ppc7400} %{Zdynamiclib:-arch_only ppc7400}} \
+ %{mcpu=7450: %{!Zdynamiclib:-arch ppc7450} %{Zdynamiclib:-arch_only ppc7450}} \
+ %{mcpu=970: %{!Zdynamiclib:-arch ppc970} %{Zdynamiclib:-arch_only ppc970}} \
+ %{!mcpu*:%{!march*:%{!Zdynamiclib:-arch ppc} %{Zdynamiclib:-arch_only ppc}}} "
+/* APPLE LOCAL end 3492132 */
#undef SUBTARGET_EXTRA_SPECS
#define SUBTARGET_EXTRA_SPECS \
+ { "darwin_arch_spec", DARWIN_ARCH_SPEC }, \
{ "darwin_arch", "ppc" },
/* The "-faltivec" option should have been called "-maltivec" all along. */
@@ -130,6 +144,24 @@ do { \
#undef RS6000_PIC_OFFSET_TABLE_REGNUM
#define RS6000_PIC_OFFSET_TABLE_REGNUM 31
+/* APPLE LOCAL begin -pg fix */
+/* -pg has a problem which is normally concealed by -fPIC;
+ either -mdynamic-no-pic or -static exposes the -pg problem, causing the
+ crash. FSF gcc for Darwin also has this bug. The problem is that -pg
+ causes several int registers to be saved and restored although they may
+ not actually be used (config/rs6000/rs6000.c:first_reg_to_save()). In the
+ rare case where none of them is actually used, a consistency check fails
+ (correctly). This cannot happen with -fPIC because the PIC register (R31)
+ is always "used" in the sense checked by the consistency check. The
+ easy fix, here, is therefore to mark R31 always "used" whenever -pg is on.
+ A better, but harder, fix would be to improve -pg's register-use
+ logic along the lines suggested by comments in the function listed above. */
+#undef PIC_OFFSET_TABLE_REGNUM
+#define PIC_OFFSET_TABLE_REGNUM ((flag_pic || profile_flag) \
+ ? RS6000_PIC_OFFSET_TABLE_REGNUM \
+ : INVALID_REGNUM)
+/* APPLE LOCAL end -pg fix */
+
/* Pad the outgoing args area to 16 bytes instead of the usual 8. */
#undef STARTING_FRAME_OFFSET
@@ -145,14 +177,27 @@ do { \
/* These are used by -fbranch-probabilities */
#define HOT_TEXT_SECTION_NAME "__TEXT,__text,regular,pure_instructions"
+/* APPLE LOCAL begin hot/cold partitioning */
#define UNLIKELY_EXECUTED_TEXT_SECTION_NAME \
- "__TEXT,__text2,regular,pure_instructions"
+ "__TEXT,__unlikely,regular,pure_instructions"
+/* APPLE LOCAL end hot/cold partitioning */
+/* APPLE LOCAL begin long branch */
/* Define cutoff for using external functions to save floating point.
- Currently on Darwin, always use inline stores. */
+ For Darwin, use the function for more than a few registers. */
+
+#undef FP_SAVE_INLINE
+#define FP_SAVE_INLINE(FIRST_REG) \
+ (((FIRST_REG) > 60 && (FIRST_REG) < 64) \
+ || TARGET_LONG_BRANCH)
+
+/* Define cutoff for using external functions to save vector registers. */
-#undef FP_SAVE_INLINE
-#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 64)
+#undef VECTOR_SAVE_INLINE
+#define VECTOR_SAVE_INLINE(FIRST_REG) \
+ (((FIRST_REG) >= LAST_ALTIVEC_REGNO - 1 && (FIRST_REG) <= LAST_ALTIVEC_REGNO) \
+ || TARGET_LONG_BRANCH)
+/* APPLE LOCAL end long branch */
/* The assembler wants the alternate register names, but without
leading percent sign. */
@@ -212,11 +257,7 @@ do { \
#undef ASM_COMMENT_START
#define ASM_COMMENT_START ";"
-/* FP save and restore routines. */
-#define SAVE_FP_PREFIX "._savef"
-#define SAVE_FP_SUFFIX ""
-#define RESTORE_FP_PREFIX "._restf"
-#define RESTORE_FP_SUFFIX ""
+/* APPLE LOCAL don't define SAVE_FP_PREFIX and friends */
/* This is how to output an assembler line that says to advance
the location counter to a multiple of 2**LOG bytes using the
@@ -288,38 +329,63 @@ do { \
? GENERAL_REGS \
: (CLASS))
-/* Fix for emit_group_load (): force large constants to be pushed via regs. */
-#define ALWAYS_PUSH_CONSTS_USING_REGS_P 1
-
-/* This now supports a natural alignment mode */
-/* Darwin word-aligns FP doubles but doubleword-aligns 64-bit ints. */
-#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
- (TARGET_ALIGN_NATURAL ? (COMPUTED) : \
- (TYPE_MODE (TREE_CODE (TREE_TYPE (FIELD)) == ARRAY_TYPE \
- ? get_inner_array_type (FIELD) \
- : TREE_TYPE (FIELD)) == DFmode \
- ? MIN ((COMPUTED), 32) : (COMPUTED)))
-
-/* Darwin increases natural record alignment to doubleword if the first
- field is an FP double while the FP fields remain word aligned. */
-#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
- ((TREE_CODE (STRUCT) == RECORD_TYPE \
- || TREE_CODE (STRUCT) == UNION_TYPE \
- || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
- && TARGET_ALIGN_NATURAL == 0 \
- ? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \
- : (TARGET_ALTIVEC && TREE_CODE (STRUCT) == VECTOR_TYPE) \
- ? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \
- : MAX ((COMPUTED), (SPECIFIED)))
+/* APPLE LOCAL begin Macintosh alignment 2002-2-26 ff */
+/* This now supports the Macintosh power, mac68k, and natural
+ alignment modes. It now has one more parameter than the standard
+ version of the ADJUST_FIELD_ALIGN macro.
+
+ The macro works as follows: We use the computed alignment of the
+ field if we are in the natural alignment mode or if the field is
+ a vector. Otherwise, if we are in the mac68k alignment mode, we
+ use the minimum of the computed alignment and 16 (pegging at
+ 2-byte alignment). If we are in the power mode, we peg at 32
+ (word alignment) unless it is the first field of the struct, in
+ which case we use the computed alignment. */
+#undef ADJUST_FIELD_ALIGN
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED, FIRST_FIELD_P) \
+ (TARGET_ALIGN_NATURAL ? (COMPUTED) : \
+ (((COMPUTED) == RS6000_VECTOR_ALIGNMENT) \
+ ? RS6000_VECTOR_ALIGNMENT \
+ : (MIN ((COMPUTED), \
+ (TARGET_ALIGN_MAC68K ? 16 \
+ : ((FIRST_FIELD_P) ? (COMPUTED) \
+ : 32))))))
+
+#undef ROUND_TYPE_ALIGN
+/* Macintosh alignment modes require more complicated handling
+ of alignment, so we replace the macro with a call to a
+ out-of-line function. */
+union tree_node;
+extern unsigned round_type_align (union tree_node*, unsigned, unsigned); /* rs6000.c */
+#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
+ round_type_align(STRUCT, COMPUTED, SPECIFIED)
+/* APPLE LOCAL end Macintosh alignment 2002-2-26 ff */
+
+/* APPLE LOCAL begin alignment */
+/* Make sure local alignments come from the type node, not the mode;
+ mode-based alignments are wrong for vectors. */
+#undef LOCAL_ALIGNMENT
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) (MAX ((unsigned) ALIGN, \
+ TYPE_ALIGN (TYPE)))
+/* APPLE LOCAL end alignment */
/* XXX: Darwin supports neither .quad, or .llong, but it also doesn't
support 64 bit PowerPC either, so this just keeps things happy. */
#define DOUBLE_INT_ASM_OP "\t.quad\t"
+/* APPLE LOCAL begin branch cost */
+#undef BRANCH_COST
+/* Better code is generated by saying conditional branches take 1 tick. */
+#define BRANCH_COST 1
+/* APPLE LOCAL end branch cost */
+
+/* APPLE LOCAL indirect calls in R12 */
+/* Address of indirect call must be computed here */
+#define MAGIC_INDIRECT_CALL_REG 12
+
/* For binary compatibility with 2.95; Darwin C APIs use bool from
stdbool.h, which was an int-sized enum in 2.95. */
#define BOOL_TYPE_SIZE INT_TYPE_SIZE
-#undef REGISTER_TARGET_PRAGMAS
-#define REGISTER_TARGET_PRAGMAS DARWIN_REGISTER_TARGET_PRAGMAS
-
+/* APPLE LOCAL OS pragma hook */
+/* Register generic Darwin pragmas as "OS" pragmas. */
diff --git a/gcc/config/rs6000/ops-to-gp b/gcc/config/rs6000/ops-to-gp
new file mode 100755
index 00000000000..becb406749b
--- /dev/null
+++ b/gcc/config/rs6000/ops-to-gp
@@ -0,0 +1,620 @@
+#!/bin/sh
+# APPLE LOCAL file AltiVec
+# ops-to-gp -gcc vec.ops builtin.ops
+# Creates vec.h used by rs6000.c
+
+arg0=`basename $0`
+err() {
+ echo "$arg0: $*" 1>&2
+ exit 2
+}
+
+if [ $# -eq 0 ] ; then
+ echo "Usage: $arg0 [ -mcc | -gcc ] builtin-ops ..." 1>&2
+ exit 1
+fi
+
+MCC=1
+GCC=0
+suffix="gp"
+if [ "$1" = "-mcc" ] ; then
+ shift;
+elif [ "$1" = "-gcc" ] ; then
+ GCC=1
+ MCC=0
+ suffix="h"
+ shift;
+fi
+
+output=`basename $1 .ops`
+gperf="gperf -G -a -o -k1-15 -p -t -D -T -N Is_Builtin_Function $output.gp";
+
+# Lines in the ops file have the form
+# @ @ betype betype-code type-spelling
+# @ fetype betype [code]
+# @ @ @ instruction type
+# generic op1 op2 ... opn = result specific when configure [addressible
+# [instruction [const_ptr_ok [volatile_ptr_ok [transform [predicate]]]]]]
+
+# Sort the ops file to put it in a canonical order.
+sort -u $* | \
+
+# Add specific function uid's, make generic functions from specific
+# functions, validate the types used, compute default parameters, and
+# compute parts of the default transform and predicate functions.
+awk 'BEGIN {
+ i = 0
+ EQ = i++
+ RESULT = i++
+ SPECIFIC = i++
+ WHEN = i++
+ CONFIGURED = i++
+ ADDRESSIBLE = i++
+ INSTRUCTION = i++
+ CONST_PTR_OK = i++
+ VOLATILE_PTR_OK = i++
+ TRANSFORM = i++
+ PREDICATE = i++
+ n_lines = 1;
+ tree[3] = "Make_Folded_4tree";
+ tree[2] = "Make_Folded_3tree";
+ tree[1] = "Make_Folded_Btree";
+ tree[0] = "Make_Utree";
+ optimize["vec_sub"] = 1;
+ optimize["vec_subs"] = 1;
+ optimize["vec_xor"] = 1;
+ optimize["vec_andc"] = 1;
+ optimize["vec_avg"] = 2;
+ optimize["vec_or"] = 2;
+ optimize["vec_and"] = 2;
+ optimize["vec_max"] = 2;
+ optimize["vec_min"] = 2;
+ optimize["vec_sld"] = 3;
+ optimize["vec_splat_s8"] = 4;
+ optimize["vec_splat_s16"] = 5;
+ optimize["vec_splat_s32"] = 6;
+ optimize["vec_splat_u8"] = 4;
+ optimize["vec_splat_u16"] = 5;
+ optimize["vec_splat_u32"] = 6;
+ optimize["vec_cmpeq"] = 7;
+ optimize["vec_lvsl"] = 8;
+ optimize["vec_lvsr"] = 9;
+ # These operations need additional transformation. Key off the
+ # optimize attribute to identify them.
+ optimize["vec_cmplt"] = 10;
+ optimize["vec_cmple"] = 10;
+ optimize["vec_abs"] = 11;
+ optimize["vec_abss"] = 11;
+ }
+ function no_type(t) {
+ printf "%% Error: type %s not declared.\n", t;
+ status = 1;
+ exit;
+ }
+ # Record the type.
+ $1 == "@" {
+ if ($2 == "@") {
+ if ($3 == "@") {
+ # Definition of an instruction.
+ insn_type[$4] = $5; # type
+ } else {
+ # Definition of a betype.
+ becode[$3] = $4; # betype-code
+ bespell[$3] = $5; # type-spelling
+ gsub(/\=/, " ", bespell[$3]);
+ }
+ } else {
+ # Definition of a fetype.
+ print $0;
+ if (!becode[$3]) no_type($3); # Must have defined the betype.
+ betype[$2] = $3; # betype;
+ if (NF == 3)
+ code[$2] = "";
+ else
+ code[$2] = $4; # code
+ }
+ }
+ function no_equal(i,l) {
+ printf "%% Syntax error %d: %s\n", i, l;
+ status = 1;
+ exit;
+ }
+ function error(f,a) {
+ printf( ("%% error: " f), a);
+ status = 1;
+ exit;
+ }
+ # Ignore comment lines.
+ $1 != "#" && $1 != "@" {
+ # Generate the signature of the specific function, the predicate,
+ # the transform, the arguments to the transform function, the
+ # arguments to the predicate function, and the spelling of the
+ # function type.
+ signature = "";
+ predicate = "";
+ transform = "";
+ insn_code = "";
+ transform_args = "";
+ predicate_args = "";
+ function_type = "";
+ # First, consider the parameter types.
+ for (i = 2; $i != "=" && i < NF; i++) {
+ if ($i != "...") {
+ if (!betype[$i]) no_type($i);
+ signature = (signature " " $i);
+ predicate = (predicate "_" betype[$i]);
+ transform = (transform code[$i]);
+ transform_args = (transform_args ", ND_kid(t," i-1 ")");
+ predicate_args = (predicate_args " " becode[betype[$i]]);
+ if (function_type)
+ function_type = (function_type ", " bespell[betype[$i]]);
+ else
+ function_type = bespell[betype[$i]];
+ }
+ }
+ constraints = (transform "@");
+ # Check the syntax of the ops file.
+ if ($i != "=" || NF > i+PREDICATE || NF < i+CONFIGURE) no_equal(i,$0);
+ if (!betype[$(i+RESULT)]) no_type($(i+RESULT));
+ # Incorporate the result type.
+ if (i == 2) {
+ predicate = "_void";
+ function_type = "void";
+ }
+ signature = ($(i+SPECIFIC) signature);
+ predicate = sprintf("is_%s_func%s", betype[$(i+RESULT)], predicate);
+ predicate_args = (becode[betype[$(i+RESULT)]] predicate_args);
+ function_type = sprintf("(%s (*)(%s))", bespell[betype[$(i+RESULT)]], \
+ function_type);
+ if (substr(code[$(i+RESULT)], 1, 1) == "j") {
+ # Handle a jump asm. The code is expedted to be
+ # j={cc-bit-num}={cc-bit-value}[={r|d}]. The operation must have
+ # one operand if the code d is used and two operands otherwise.
+ # The transform function can implement the r code by reversing the
+ # two operands. In all cases, the first operand is a computed
+ # constant encoding both the bit number and the test.
+ n = split(code[$(i+RESULT)], jmp, "=");
+ if (jmp[n] == "d" && i != 3) error("%d operands", i-2);
+ if (jmp[n] != "d" && i != 4) error("%d operands", i-2);
+ if (jmp[n] == "r")
+ transform_args = ", ND_kid(t,2), ND_kid(t,1)";
+ transform_args = sprintf("%s(OP_VCMP%s%s", tree[i-2], \
+ toupper(jmp[3]), transform_args);
+ if (jmp[n] == "r")
+ transform = ("r" transform);
+ insn_code = sprintf("CODE_FOR_j_%d_%s_f%s", jmp[2], jmp[3], \
+ transform);
+ transform = sprintf("transform_j_%d_%s_f%s", jmp[2], jmp[3], \
+ transform);
+ } else {
+ transform_args = sprintf("%s(OP_%sASM%s%s", tree[i-2], \
+ toupper(code[$(i+RESULT)]), \
+ toupper(transform), transform_args);
+ insn_code = sprintf("CODE_FOR_%sf%s", code[$(i+RESULT)], transform);
+ transform = sprintf("transform_%sf%s", code[$(i+RESULT)], transform);
+ }
+ # Give a unique id to the signature
+ if (count[signature] == 0)
+ count[signature] = ++uid[$(i+SPECIFIC)];
+
+ # Compute the default instruction name
+ nf = split($(i+SPECIFIC), part, "_");
+ instruction = ("MOP_" part[nf]);
+
+ # Compute the insn_code, but use the instruction override if given.
+ if (NF >= i+INSTRUCTION)
+ instruction = $(i+INSTRUCTION);
+ if (insn_type[instruction])
+ insn_code = (insn_code "_" insn_type[instruction]);
+
+ # Allow the user to override the addressibility, instruction,
+ # const_ptr_ok, volatile_ptr_ok, transform, and predicate.
+ if (NF >= i+ADDRESSIBLE)
+ addressible = "";
+ else
+ addressible = "FALSE";
+
+ if (NF >= i+INSTRUCTION)
+ instruction = "";
+ else if (substr($1, 1, 4) == "vec_")
+ print "@ @3", instruction;
+
+ if (NF >= i+CONST_PTR_OK)
+ const_ptr_ok = "";
+ else
+ const_ptr_ok = "FALSE";
+
+ if (NF >= i+VOLATILE_PTR_OK)
+ volatile_ptr_ok = "";
+ else
+ volatile_ptr_ok = "FALSE";
+
+ if (NF >= i+TRANSFORM)
+ transform = "";
+ else
+ print "@ @1", transform, transform_args;
+
+ if (NF >= i+PREDICATE)
+ predicate = "";
+ else
+ print "@ @2", i-2, predicate, predicate_args, function_type;
+
+ if (optimize[$1])
+ optimize_method = optimize[$1];
+ else
+ optimize_method = "0";
+
+ # Record the line, addressibility, instruction, transform,
+ # predicate, and unique id.
+ line[n_lines++] = ($0 " " addressible " " instruction " " \
+ const_ptr_ok " " volatile_ptr_ok " " transform " " \
+ predicate " " insn_code " " constraints " " \
+ optimize_method " " count[signature]);
+ }
+ END {
+ if (status) exit;
+ # generic op1 op2 ... opn = result specific when configured
+ # addressable instruction const_ptr_ok volatile_ptr_ok
+ # transform predicate insn_code constraints optimize uid
+ SPECIFIC = 12
+ for (i = 1; i < n_lines; i++) {
+ nf = split(line[i], part);
+ specific = part[nf-SPECIFIC];
+
+ # Print the generic form.
+ printf "%s", part[1];
+ for (j = 2; j <= nf-SPECIFIC; j++) printf " %s", part[j];
+ if (uid[specific] > 1) printf ":%d", part[nf];
+ while (j < nf) printf " %s", part[j++];
+ printf "\n";
+
+ # Print the specific form.
+ printf "%s", specific;
+ for (j = 2; j <= nf-SPECIFIC; j++) printf " %s", part[j];
+ if (uid[specific] > 1) printf ":%d", part[nf];
+ while (j < nf) printf " %s", part[j++];
+ printf "\n";
+ }
+ }' | \
+
+# Strip out load and store qualifiers.
+sed -e 's/_load_op//g' -e 's/_store_op//g' | \
+
+# Sort the processed file and eliminate duplicates.
+sort -u | \
+
+# Append the count of each generic function to each line.
+awk 'function push() {
+ if (num)
+ for (i = 0; i < num; i++)
+ print line[i], num;
+ num = 0;
+ }
+ $1 == "@" {
+ print $0;
+ }
+ $1 != "@" {
+ if (last != $1)
+ push();
+ last = $1;
+ line[num++] = $0;
+ }
+ END {
+ push();
+ }' | \
+
+# Now compute the gperf input file.
+# Lines now have a fixed format
+# generic op1 ... opn = result specific instruction when configured
+# addressible const_ptr_ok volatile_ptr_ok transform predicate
+# insn_code constraints optimize count
+awk 'BEGIN {
+ MCC = '$MCC'
+ GCC = '$GCC'
+ i = 0;
+ COUNT = i++
+ OPTIMIZE = i++
+ CONSTRAINTS = i++
+ INSN_CODE = i++
+ PREDICATE = i++
+ TRANSFORM = i++
+ VOLATILE_PTR_OK = i++
+ CONST_PTR_OK = i++
+ INSTRUCTION = i++
+ ADDRESSIBLE = i++
+ CONFIGURED = i++
+ WHEN = i++
+ SPECIFIC = i++
+ RESULT = i++
+ EQ = i++
+ OPN = i++
+ NARGS = i++
+ if (MCC) {
+ print "%{";
+ print "/* Command-line: '"$gperf"' */";
+ MAXARGS = 5
+ }
+ if (GCC)
+ MAXARGS = 3
+ }
+ function write_test(tree, type, num) {
+ if (type == "PTR") {
+ printf "\n && TY_kind(%s) == KIND_POINTER", tree;
+ } else if (type == "I5") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && ((UINT32)Targ_To_Host(tc) + 16) < 32";
+ } else if (type == "U5") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && (UINT32)Targ_To_Host(tc) < 32";
+ } else if (type == "U4") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && (UINT32)Targ_To_Host(tc) < 16";
+ } else if (type == "U2") {
+ printf "\n && is_integer_type(%s)", tree;
+ printf "\n && Is_Const(ND_kid0(ND_kid(t,%d)), &tc)", num;
+ printf "\n && (UINT32)Targ_To_Host(tc) < 4";
+ } else if (type == "BETYPE_U4" || type == "BETYPE_I4") {
+ printf "\n && is_integer_type(%s)", tree;
+ } else {
+ printf "\n && Similar_Types(%s,", tree;
+ printf "\n\t\t Be_Type_Tbl(%s), IGNORE_QUALIFIERS)", type;
+ }
+ }
+ $1 == "@" {
+ if (MCC) {
+ if ($2 == "@1") {
+ # Write the predicate function from the given parameters.
+ # The format is:
+ # @ @1 transform_ifii Make_3tree(OP_IASMII, ND_kid(t,1), ND_kid(t,2)
+ print "";
+ print "/*ARGSUSED*/";
+ print "static void";
+ print $3 "(ND *func, ND *parent, ND *t, struct builtin *self)";
+ print "{";
+ printf " *t = *%s", $4;
+ for (i = 5; i <= NF; i++) printf " %s", $i;
+ print ",";
+ if (split($3,jmp,"_") == 5 && jmp[2] == "j")
+ printf "\t\t MK_I4CONST_ND((self->data << 5) + %d));\n", \
+ jmp[3];
+ else
+ print "\t\t MK_I4CONST_ND(self->data));";
+
+ print " Is_True(self->data > 0, (\"No implementation for %s\", self->name));";
+ print "}";
+ } else if ($2 == "@2") {
+ # Write the transform function from the given parameters.
+ # The format is:
+ # @ @2 2 is_int_func_int_int BETYPE_I4 BETYPE_I4 BETYPE_I4
+ # (int (*)(int, int))
+ print "";
+ print "/*ARGSUSED*/";
+ print "static BOOL";
+ print $4 "(ND *func, ND *parent, ND *t, struct builtin *self)";
+ print "{";
+ print " TCON tc;";
+ printf " if (ND_nkids(t) == %d", $3+1;
+ write_test("ST_type(ND_dec(func))", $5, "");
+ for (i = 1; i <= $3; i++) {
+ printf "\n && ND_name(ND_kid(t,%d)) == TO_VAL", i;
+ write_test(sprintf("The_Tree_Type(ND_kid(t,%d))", i), $(i+5), i);
+ }
+ print ")";
+ print " return TRUE;";
+ print " Error_Prt_Line (ND_linenum(t), ec_builtin_function_type, self->name,";
+ i = $3+6;
+ printf "\t\t \"%s", $i;
+ while (++i <= NF) printf " %s", $i;
+ print "\");";
+ print " return FALSE;";
+ print "}";
+ } else if ($2 == "@3") {
+ if (once++ == 0) printf "\n#ifndef HAVE_ALTIVEC\n";
+ printf "#define %s -1\n", $3;
+ } else {
+ if (once && twice++ == 0) printf "#endif /* HAVE_ALTIVEC */\n\n";
+ printf "extern struct a_type *T_%s;\n", $2;
+ }
+ }
+ next;
+ }
+ $1 == "%" {
+ print $0;
+ status = 1;
+ exit;
+ }
+ {
+ # Compute the signature of the generic function.
+ signature=$1;
+ for (i = 2; i <= NF-OPN; i++) {
+ if ($i != "...")
+ signature=(signature " " $i);
+ }
+
+ # Ensure that the signature is unique.
+ if (signature_line[signature]) {
+ print "Ambiguous signatures:";
+ print $0;
+ print line[signature_line[signature]];
+ }
+ signature_line[signature] = n_lines;
+
+ # Require that overloaded functions have the same attributes:
+ # number of arguments, when, configured, and addressible.
+ if (same_arg_count[$1] && same_arg_count[$1] != NF)
+ printf "%% number of arguments for %s varies: %d and %d\n", \
+ $1, NF-NARGS, same_arg_count[$1]-NARGS;
+ same_arg_count[$1] = NF;
+
+ if (same_when[$1] && same_when[$1] != $(NF-WHEN))
+ printf "%% when for %s varies: %s and %s\n", \
+ $1, $(NF-WHEN), same_when[$1];
+ same_when[$1] = $(NF-WHEN);
+
+ if (same_configured[$1] && same_configured[$1] != $(NF-CONFIGURED))
+ printf "%% configured for %s varies: %s and %s\n", \
+ $1, $(NF-CONFIGURED), same_configured[$1];
+ same_configured[$1] = $(NF-CONFIGURED);
+
+ if (same_addressible[$1] && same_addressible[$1] != $(NF-ADDRESSIBLE))
+ printf "%% addressible for %s varies: %s and %s\n", \
+ $1, $(NF-ADDRESSIBLE), same_addressible[$1];
+ else if (same_addressible[$1] && same_addressible[$1] != "FALSE")
+ printf "%% Overloaded function %s is addressible\n", $1
+ same_addressible[$1] = $(NF-ADDRESSIBLE);
+
+ # Record the line.
+ line[n_lines++] = $0;
+ }
+ function push(fcn, n) {
+ if (last) printf "};\n";
+ # Gcc3: declare as arrays of const pointers
+ if (fcn) printf "static const struct builtin *const O_%s[%d] = {\n", fcn, n;
+ last = fcn;
+ }
+ function mangle(name) {
+ if (split(name, names, ":") == 1)
+ return ("B_" names[1]);
+ return ("B" names[2] "_" names[1]);
+ }
+ END {
+ if (status) exit;
+
+ # Gcc3: Mark file as Apple local
+ printf "/* APPLE LOCAL file AltiVec */\n";
+ printf "/* This file is generated by ops-to-gp. Do not edit. */\n\n";
+ printf "/* To regenerate execute:\n";
+ printf " ops-to-gp -gcc vec.ops builtin.ops\n";
+ printf " with the current directory being gcc/config/rs6000. */\n\n";
+
+ # Output the description of each specific function.
+ uid = 0;
+ if (MCC) print "";
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[nf-SPECIFIC];
+ if (!done[fcn]) {
+ printf "static const struct builtin %s = {", mangle(fcn);
+ if (GCC) printf " {";
+ ellipsis = 1;
+ for (j = 2; j <= nf-OPN; j++)
+ if (part[j] != "...") {
+ printf " &T_%s,", part[j];
+ } else {
+ ellipsis = -1;
+ printf " NULL,";
+ }
+ while (j++ <= MAXARGS+1)
+ printf " NULL,";
+ instruction = part[nf-INSTRUCTION];
+ if (substr(instruction, 1, 4) == "MOP_")
+ instruction = substr(instruction, 5);
+ if (substr(instruction, length(instruction)) == "D")
+ instruction = (substr(instruction, 1, length(instruction) - 1) ".");
+ # Gcc3: Prefix each specific instruction with a "*"
+ if (match (instruction, "^[a-zA-Z]") > 0)
+ instruction = "*" instruction;
+ if (GCC) printf " },";
+ if (GCC) printf " \"%s\",", substr(part[nf-CONSTRAINTS], 1, length(part[nf-CONSTRAINTS]) - 1);
+ printf " &T_%s,", part[nf-RESULT];
+ if (MCC) printf " \"%s\",", part[nf-SPECIFIC];
+ printf " %d,", ellipsis * (nf - NARGS);
+ if (MCC) {
+ printf " %s,", part[nf-WHEN];
+ printf " %s,", part[nf-ADDRESSIBLE];
+ printf " %s,", part[nf-CONST_PTR_OK];
+ printf " %s,", part[nf-VOLATILE_PTR_OK];
+ printf " %s,", part[nf-CONFIGURED];
+ printf " %s,", part[nf-INSTRUCTION];
+ printf " %s,", part[nf-TRANSFORM];
+ printf " %s", part[nf-PREDICATE];
+ } else if (GCC) {
+ printf " %s,", part[nf-CONST_PTR_OK];
+ printf " %s,", part[nf-VOLATILE_PTR_OK];
+ printf " %s,", part[nf-OPTIMIZE];
+ printf " \"%s\",", part[nf-SPECIFIC];
+ printf " \"%s\",", instruction;
+ printf " %s,", part[nf-INSN_CODE];
+ printf " B_UID(%d)", uid++;
+ }
+ printf " };\n";
+ }
+ done[fcn] = 1;
+ }
+
+ if (GCC) printf "#define LAST_B_UID B_UID(%d)\n", uid;
+
+ if (GCC) {
+ # Output the description of each specific function.
+ print "";
+ uid = 0;
+ for (i in done)
+ done[i] = "";
+ print "const struct builtin * const Builtin[] = {"
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[nf-SPECIFIC];
+ if (!done[fcn]) {
+ printf " &%s,\n", mangle(fcn);
+ }
+ done[fcn] = 1;
+ }
+ print "};"
+ }
+
+ # Output the overload tables for each generic function.
+ print "";
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[1];
+ if (last != fcn)
+ push(fcn, part[nf]);
+ printf " &%s,\n", mangle(part[nf-SPECIFIC]);
+ }
+ push("", 0);
+
+ # Output the builtin function structure.
+ print "";
+ if (MCC) {
+ print "%}";
+ print "struct overloadx {";
+ print " char *name;";
+ print " int fcns;";
+ print " int args;";
+ print " struct builtin **functions;";
+ print "};";
+ print "%%";
+ } else if (GCC) {
+ print "const struct overloadx Overload[] = {";
+ }
+
+ # Output the builtin function list and data.
+ uid = 0;
+ for (i = 0; i < n_lines; i++) {
+ nf = split(line[i], part);
+ fcn = part[1];
+ args = nf - NARGS;
+ if (part[nf-OPN] == "...") args = -args;
+ if (last != fcn) {
+ if (MCC) printf "%s, %d, %d, O_%s\n", fcn, part[nf], args, fcn;
+ if (GCC) printf " { \"%s\", %d, %d, O_%s, O_UID(%d) },\n", \
+ fcn, part[nf], args, fcn, uid++;
+ }
+ last = fcn;
+ }
+
+ if (GCC) {
+ print " { NULL, 0, 0, NULL, 0 }"
+ print "};";
+
+ printf "#define LAST_O_UID O_UID(%d)\n", uid;
+ }
+
+ }' > $output.$suffix
+
+if [ "$MCC" = "1" ] ; then
+ $gperf > $output.h
+fi
diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c
index 5d36d5d28b3..b48e4b019e7 100644
--- a/gcc/config/rs6000/rs6000-c.c
+++ b/gcc/config/rs6000/rs6000-c.c
@@ -30,6 +30,11 @@
#include "c-pragma.h"
#include "errors.h"
#include "tm_p.h"
+/* APPLE LOCAL begin AltiVec */
+#include "c-common.h"
+#include "cpplib.h"
+#include "target.h"
+/* APPLE LOCAL end AltiVec */
/* Handle the machine specific pragma longcall. Its syntax is
@@ -78,6 +83,98 @@ rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
#define builtin_define(TXT) cpp_define (pfile, TXT)
#define builtin_assert(TXT) cpp_assert (pfile, TXT)
+/* APPLE LOCAL begin AltiVec */
+/* Keep the AltiVec keywords handy for fast comparisons. */
+static GTY(()) cpp_hashnode *__vector_keyword;
+static GTY(()) cpp_hashnode *vector_keyword;
+static GTY(()) cpp_hashnode *__pixel_keyword;
+static GTY(()) cpp_hashnode *pixel_keyword;
+static GTY(()) cpp_hashnode *__bool_keyword;
+static GTY(()) cpp_hashnode *bool_keyword;
+static GTY(()) cpp_hashnode *_Bool_keyword;
+
+/* Called to decide whether a conditional macro should be expanded. */
+
+bool
+rs6000_expand_macro_p (const cpp_token *tok)
+{
+ static bool expand_bool_pixel = 0;
+ bool expand_this = 0;
+ const cpp_hashnode *ident = tok->val.node;
+
+ if (ident == vector_keyword)
+ {
+ tok = c_lex_peek (0);
+ if (tok->type == CPP_NAME)
+ {
+ ident = tok->val.node;
+ if (ident == pixel_keyword || ident == __pixel_keyword
+ || ident == bool_keyword || ident == __bool_keyword
+ || ident == _Bool_keyword)
+ expand_this = expand_bool_pixel = 1;
+ else
+ {
+ enum rid rid_code = (enum rid)(ident->rid_code);
+
+ if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
+ || rid_code == RID_SHORT || rid_code == RID_SIGNED
+ || rid_code == RID_INT || rid_code == RID_CHAR
+ || rid_code == RID_FLOAT)
+ {
+ expand_this = 1;
+ /* If the next keyword is bool or pixel, it
+ will need to be expanded as well. */
+ tok = c_lex_peek (1);
+ if (tok->type == CPP_NAME)
+ {
+ ident = tok->val.node;
+ if (ident == pixel_keyword || ident == __pixel_keyword
+ || ident == bool_keyword || ident == __bool_keyword
+ || ident == _Bool_keyword)
+ expand_bool_pixel = 1;
+ }
+ }
+ }
+ }
+ }
+ else if (ident == pixel_keyword || ident == bool_keyword
+ || ident == _Bool_keyword)
+ {
+ if (expand_bool_pixel)
+ {
+ expand_this = 1;
+ expand_bool_pixel = 0;
+ }
+ }
+
+ return expand_this;
+}
+
+static void
+cb_define_conditional_macro (cpp_reader *pfile ATTRIBUTE_UNUSED,
+ unsigned int n ATTRIBUTE_UNUSED,
+ cpp_hashnode *node) {
+ const unsigned char *name = node->ident.str;
+ bool underscore = (name[1] == '_');
+ char kwd = (underscore ? name[2] : name[0]);
+ cpp_hashnode **kwd_node = 0;
+
+ if (!underscore) /* macros without two leading underscores */
+ node->flags |= NODE_DISABLED; /* shall be conditional */
+
+ switch (kwd)
+ {
+ case 'v': kwd_node = (underscore ? &__vector_keyword : &vector_keyword); break;
+ case 'p': kwd_node = (underscore ? &__pixel_keyword : &pixel_keyword); break;
+ case 'b': kwd_node = (underscore ? &__bool_keyword : &bool_keyword); break;
+ case '_': kwd_node = &_Bool_keyword; break;
+ default: abort ();
+ }
+ *kwd_node = node;
+}
+
+/* APPLE LOCAL end AltiVec */
+
void
rs6000_cpu_cpp_builtins (cpp_reader *pfile)
{
@@ -93,13 +190,39 @@ rs6000_cpu_cpp_builtins (cpp_reader *pfile)
builtin_define ("_ARCH_COM");
if (TARGET_ALTIVEC)
{
+ /* APPLE LOCAL begin AltiVec */
+ struct cpp_callbacks *cb = cpp_get_callbacks (pfile);
+ void (*old_cb_define) (cpp_reader *, unsigned int, cpp_hashnode *)
+ = cb->define;
+ /* APPLE LOCAL end AltiVec */
+
builtin_define ("__ALTIVEC__");
builtin_define ("__VEC__=10206");
/* Define the AltiVec syntactic elements. */
+
+ /* APPLE LOCAL AltiVec */
+ cb->define = cb_define_conditional_macro;
+
builtin_define ("__vector=__attribute__((altivec(vector__)))");
builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
+
+ /* APPLE LOCAL begin AltiVec */
+ /* Keywords without two leading underscores are context-sensitive, and hence
+ implemented as conditional macros, controlled by the rs6000_expand_macro_p()
+ predicate above. */
+ builtin_define ("vector=__attribute__((altivec(vector__)))");
+ builtin_define ("pixel=__attribute__((altivec(pixel__))) unsigned short");
+ builtin_define ("bool=__attribute__((altivec(bool__))) unsigned");
+ builtin_define ("_Bool=__attribute__((altivec(bool__))) unsigned");
+ cb->define = old_cb_define;
+
+ /* Enable context-sensitive macros. */
+ targetm.expand_macro_p = rs6000_expand_macro_p;
+ /* Enable '(vector signed int)(a, b, c, d)' vector literal notation. */
+ targetm.cast_expr_as_vector_init = true;
+ /* APPLE LOCAL end AltiVec */
}
if (TARGET_SPE)
builtin_define ("__SPE__");
diff --git a/gcc/config/rs6000/rs6000-protos.h b/gcc/config/rs6000/rs6000-protos.h
index 1121e309db9..accb6087d28 100644
--- a/gcc/config/rs6000/rs6000-protos.h
+++ b/gcc/config/rs6000/rs6000-protos.h
@@ -29,6 +29,7 @@
#ifdef TREE_CODE
extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, int, int, int);
+
extern void rs6000_va_start (tree, rtx);
#endif /* TREE_CODE */
@@ -127,10 +128,13 @@ extern int mfcr_operation (rtx, enum machine_mode);
extern int mtcrf_operation (rtx, enum machine_mode);
extern int lmw_operation (rtx, enum machine_mode);
extern struct rtx_def *create_TOC_reference (rtx);
+/* APPLE LOCAL RTX_COST for multiply */
+extern int rs6000_rtx_mult_cost (rtx);
extern void rs6000_split_multireg_move (rtx, rtx);
extern void rs6000_emit_move (rtx, rtx, enum machine_mode);
extern rtx rs6000_legitimize_address (rtx, rtx, enum machine_mode);
-extern rtx rs6000_legitimize_reload_address (rtx, enum machine_mode,
+/* APPLE LOCAL pass reload addr by address */
+extern rtx rs6000_legitimize_reload_address (rtx *, enum machine_mode,
int, int, int, int *);
extern int rs6000_legitimate_address (enum machine_mode, rtx, int);
extern bool rs6000_mode_dependent_address (rtx);
@@ -204,8 +208,13 @@ extern int rs6000_tls_symbol_ref (rtx, enum machine_mode);
extern void rs6000_pragma_longcall (struct cpp_reader *);
extern void rs6000_cpu_cpp_builtins (struct cpp_reader *);
+/* APPLE LOCAL AltiVec */
+extern bool rs6000_expand_macro_p (const struct cpp_token *);
+
#if TARGET_MACHO
-char *output_call (rtx, rtx *, int, int);
+void add_compiler_stub PARAMS ((tree, tree, int));
+void output_compiler_stub PARAMS ((void));
+extern char* output_call PARAMS ((rtx, rtx *, int, int));
#endif
#endif /* rs6000-protos.h */
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 73b912cf930..27f0175b51e 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -50,12 +50,20 @@
#include "target-def.h"
#include "langhooks.h"
#include "reload.h"
+/* APPLE LOCAL why is this needed? */
+#include "insn-addr.h"
#include "cfglayout.h"
#include "sched-int.h"
#if TARGET_XCOFF
#include "xcoffout.h" /* get declarations of xcoff_*_section_name */
#endif
+/* APPLE LOCAL begin Macintosh alignment */
+#ifndef TARGET_ALIGN_MAC68K
+#define TARGET_ALIGN_MAC68K 0
+#endif
+/* APPLE LOCAL end Macintosh alignment */
+
#ifndef TARGET_NO_PROTOTYPE
#define TARGET_NO_PROTOTYPE 0
#endif
@@ -216,9 +224,16 @@ int rs6000_debug_arg; /* debug argument handling */
static GTY(()) tree opaque_V2SI_type_node;
static GTY(()) tree opaque_V2SF_type_node;
static GTY(()) tree opaque_p_V2SI_type_node;
-
-/* AltiVec requires a few more basic types in addition to the vector
- types already defined in tree.c. */
+static GTY(()) tree V16QI_type_node;
+static GTY(()) tree V2SI_type_node;
+static GTY(()) tree V2SF_type_node;
+static GTY(()) tree V4HI_type_node;
+static GTY(()) tree V4SI_type_node;
+static GTY(()) tree V4SF_type_node;
+static GTY(()) tree V8HI_type_node;
+static GTY(()) tree unsigned_V16QI_type_node;
+static GTY(()) tree unsigned_V8HI_type_node;
+static GTY(()) tree unsigned_V4SI_type_node;
static GTY(()) tree bool_char_type_node; /* __bool char */
static GTY(()) tree bool_short_type_node; /* __bool short */
static GTY(()) tree bool_int_type_node; /* __bool int */
@@ -302,6 +317,7 @@ static void rs6000_assemble_visibility (tree, int);
static int rs6000_ra_ever_killed (void);
static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
+static const char *rs6000_mangle_fundamental_type (tree);
extern const struct attribute_spec rs6000_attribute_table[];
static void rs6000_set_default_type_attributes (tree);
static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT);
@@ -410,15 +426,15 @@ static rtx rs6000_spe_function_arg (CUMULATIVE_ARGS *,
enum machine_mode, tree);
static rtx rs6000_mixed_function_arg (CUMULATIVE_ARGS *,
enum machine_mode, tree, int);
-static void rs6000_move_block_from_reg(int regno, rtx x, int nregs);
+static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
static void setup_incoming_varargs (CUMULATIVE_ARGS *,
enum machine_mode, tree,
int *, int);
#if TARGET_MACHO
static void macho_branch_islands (void);
static void add_compiler_branch_island (tree, tree, int);
-static int no_previous_def (tree function_name);
-static tree get_prev_label (tree function_name);
+static int no_previous_def (tree);
+static tree get_prev_label (tree);
#endif
static tree rs6000_build_builtin_va_list (void);
@@ -575,6 +591,9 @@ static const char alt_reg_names[][8] =
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
+#undef TARGET_MANGLE_FUNDAMENTAL_TYPE
+#define TARGET_MANGLE_FUNDAMENTAL_TYPE rs6000_mangle_fundamental_type
+
#undef TARGET_INIT_LIBFUNCS
#define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
@@ -636,6 +655,9 @@ rs6000_override_options (const char *default_cpu)
size_t i, j;
struct rs6000_cpu_select *ptr;
int set_masks;
+/* APPLE LOCAL begin -fast */
+ enum processor_type mcpu_cpu;
+/* APPLE LOCAL end -fast */
/* Simplifications for entries below. */
@@ -746,6 +768,20 @@ rs6000_override_options (const char *default_cpu)
rs6000_select[0].string = default_cpu;
rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
+ /* APPLE LOCAL begin -fast */
+ if (flag_fast || flag_fastf || flag_fastcp)
+ {
+ mcpu_cpu = PROCESSOR_POWER4;
+ if (rs6000_select[1].string == (char *)0 && rs6000_select[2].string == (char *)0)
+ {
+ /* -mcpu and -mtune unspecified. Assume both are G5 */
+ set_target_switch ("tune=G5");
+ set_target_switch ("cpu=G5");
+ }
+ }
+ /* APPLE LOCAL end -fast */
+
+
for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
{
ptr = &rs6000_select[i];
@@ -762,6 +798,9 @@ rs6000_override_options (const char *default_cpu)
target_flags &= ~set_masks;
target_flags |= (processor_target_table[j].target_enable
& set_masks);
+ /* APPLE LOCAL begin -fast */
+ mcpu_cpu = processor_target_table[j].processor;
+ /* APPLE LOCAL end -fast */
}
break;
}
@@ -771,6 +810,48 @@ rs6000_override_options (const char *default_cpu)
}
}
+ /* APPLE LOCAL begin -fast */
+ if (flag_fast || flag_fastf || flag_fastcp)
+ {
+ flag_gcse_sm = 1;
+ flag_loop_transpose = 1;
+ rs6000_sched_insert_nops = sched_finish_regroup_exact;
+ flag_unroll_loops = 1;
+ flag_strict_aliasing = 1;
+ flag_schedule_interblock = 1;
+ align_jumps_max_skip = 15;
+ align_loops_max_skip = 15;
+ align_functions = 16;
+ align_loops = 16;
+ align_jumps = 16;
+ set_fast_math_flags (1);
+ flag_reorder_blocks = 1;
+#if 0
+ if (flag_branch_probabilities)
+ flag_reorder_blocks_and_partition = 1;
+#endif
+ if (!flag_pic)
+ set_target_switch ("dynamic-no-pic");
+
+ if (mcpu_cpu == PROCESSOR_POWER4)
+ {
+ set_target_switch ("powerpc-gpopt");
+ set_target_switch ("powerpc64");
+ }
+ if (flag_fast || flag_fastcp)
+ /* This doesn't work with NAG Fortran output. The gcc 3.5 C++ libraries
+ have been adjusted so that it now works with them. */
+ set_target_switch ("align-natural");
+ if (flag_fastf)
+ /* This applies Fortran argument semantics; for NAG Fortran output only. */
+ flag_argument_noalias = 2;
+ /* IMI flags */
+ disable_typechecking_for_spec_flag = 1;
+ flag_unit_at_a_time = 1;
+ }
+ /* APPLE LOCAL end -fast */
+
+
if (TARGET_E500)
rs6000_isel = 1;
@@ -1070,6 +1151,10 @@ rs6000_parse_alignment_option (void)
{
if (rs6000_alignment_string == 0)
return;
+/* APPLE LOCAL begin Macintosh alignment 2002-2-26 ff */
+ else if (! strcmp (rs6000_alignment_string, "mac68k"))
+ rs6000_alignment_flags = MASK_ALIGN_MAC68K;
+/* APPLE LOCAL end Macintosh alignment 2002-2-26 ff */
else if (! strcmp (rs6000_alignment_string, "power"))
rs6000_alignment_flags = MASK_ALIGN_POWER;
else if (! strcmp (rs6000_alignment_string, "natural"))
@@ -1099,6 +1184,22 @@ rs6000_parse_tls_size_option (void)
void
optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
{
+ /* APPLE LOCAL begin tweak default optimizations */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ /* Turn these on only if specifically requested, not with -O* */
+ /* Strict aliasing breaks too much existing code */
+ flag_strict_aliasing = 0;
+ /* Block reordering causes code bloat, and very little speedup */
+ flag_reorder_blocks = 0;
+ /* Multi-basic-block scheduling loses badly when the compiler
+ misguesses which blocks are going to be executed, more than
+ it gains when it guesses correctly. Its guesses for cases
+ where interblock scheduling occurs (if-then-else's) are
+ little better than random, so disable this unless requested. */
+ flag_schedule_interblock = 0;
+ }
+ /* APPLE LOCAL end tweak default optimizations */
}
/* Do anything needed at the start of the asm file. */
@@ -2271,6 +2372,11 @@ call_operand (rtx op, enum machine_mode mode)
return 0;
return (GET_CODE (op) == SYMBOL_REF
+ /* APPLE LOCAL begin accept hard R12 as target reg */
+#ifdef MAGIC_INDIRECT_CALL_REG
+ || (GET_CODE (op) == REG && REGNO (op) == MAGIC_INDIRECT_CALL_REG)
+#endif
+ /* APPLE LOCAL end accept hard R12 as target reg */
|| (GET_CODE (op) == REG
&& (REGNO (op) == LINK_REGISTER_REGNUM
|| REGNO (op) == COUNT_REGISTER_REGNUM
@@ -2356,7 +2462,7 @@ rs6000_special_round_type_align (tree type, int computed, int specified)
tree field = TYPE_FIELDS (type);
/* Skip all the static variables only if ABI is greater than
- 1 or equal to 0. */
+ 1 or equal to 0. */
while (field != NULL && TREE_CODE (field) == VAR_DECL)
field = TREE_CHAIN (field);
@@ -3050,9 +3156,12 @@ rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
The Darwin code is inside #if TARGET_MACHO because only then is
machopic_function_base_name() defined. */
rtx
-rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
+/* APPLE LOCAL pass reload addr by address */
+rs6000_legitimize_reload_address (rtx *addr_x, enum machine_mode mode,
int opnum, int type, int ind_levels ATTRIBUTE_UNUSED, int *win)
{
+ /* APPLE LOCAL pass reload addr by address */
+ rtx x = *addr_x;
/* We must recognize output that we have already generated ourselves. */
if (GET_CODE (x) == PLUS
&& GET_CODE (XEXP (x, 0)) == PLUS
@@ -3408,6 +3517,57 @@ rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
return dest;
}
+/* APPLE LOCAL begin RTX_COST for multiply */
+int
+rs6000_rtx_mult_cost (rtx x)
+{
+ switch (rs6000_cpu)
+ {
+ case PROCESSOR_RIOS1:
+ case PROCESSOR_PPC405:
+ return (GET_CODE (XEXP (x, 1)) != CONST_INT
+ ? COSTS_N_INSNS (5)
+ : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255
+ ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4));
+ case PROCESSOR_RS64A:
+ return (GET_CODE (XEXP (x, 1)) != CONST_INT
+ ? GET_MODE (XEXP (x, 1)) != DImode
+ ? COSTS_N_INSNS (20) : COSTS_N_INSNS (34)
+ : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255
+ ? COSTS_N_INSNS (8) : COSTS_N_INSNS (12));
+ case PROCESSOR_RIOS2:
+ case PROCESSOR_MPCCORE:
+ case PROCESSOR_PPC604e:
+ return COSTS_N_INSNS (2);
+ case PROCESSOR_PPC601:
+ return COSTS_N_INSNS (5);
+ case PROCESSOR_PPC603:
+ case PROCESSOR_PPC7400:
+ case PROCESSOR_PPC750:
+ return (GET_CODE (XEXP (x, 1)) != CONST_INT
+ ? COSTS_N_INSNS (5)
+ : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255
+ ? COSTS_N_INSNS (2) : COSTS_N_INSNS (3));
+ case PROCESSOR_PPC7450:
+ return (GET_CODE (XEXP (x, 1)) != CONST_INT
+ ? COSTS_N_INSNS (4)
+ : COSTS_N_INSNS (3));
+ case PROCESSOR_PPC403:
+ case PROCESSOR_PPC604:
+ return COSTS_N_INSNS (4);
+ case PROCESSOR_PPC620:
+ case PROCESSOR_PPC630:
+ return (GET_CODE (XEXP (x, 1)) != CONST_INT
+ ? GET_MODE (XEXP (x, 1)) != DImode
+ ? COSTS_N_INSNS (5) : COSTS_N_INSNS (7)
+ : INTVAL (XEXP (x, 1)) >= -256 && INTVAL (XEXP (x, 1)) <= 255
+ ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4));
+ default:
+ abort ();
+ }
+}
+/* APPLE LOCAL end RTX_COST for multiply */
+
/* Emit a move from SOURCE to DEST in mode MODE. */
void
rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
@@ -3845,6 +4005,8 @@ init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
/* Check for a longcall attribute. */
if (fntype
+ /* APPLE LOCAL long-branch */
+ && TARGET_LONG_BRANCH
&& lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
&& !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype)))
cum->call_cookie = CALL_LONG;
@@ -3953,6 +4115,7 @@ function_arg_boundary (enum machine_mode mode, tree type ATTRIBUTE_UNUSED)
else
return PARM_BOUNDARY;
}
+
/* Update the data in CUM to advance over an argument
of mode MODE and data type TYPE.
@@ -4224,7 +4387,49 @@ rs6000_mixed_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
k == 0 ? const0_rtx : GEN_INT (k*4));
return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k, rtlvec));
- }
+ }
+ else if (ALTIVEC_VECTOR_MODE(mode) && align_words <= (GP_ARG_NUM_REG - 1))
+ {
+ /* Varargs vector regs must be saved in R5-R8 or R9-R10. */
+ if (align_words == GP_ARG_NUM_REG - 2)
+ {
+ /* R9-R10 */
+ return gen_rtx_PARALLEL (mode,
+ gen_rtvec (3,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ NULL_RTX, const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_MIN_REG
+ + align_words),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_MIN_REG
+ + align_words+1),
+ GEN_INT(4))));
+ }
+ else
+ {
+ /* R5-R8 */
+ int k;
+ int size = int_size_in_bytes (type);
+ int no_units = ((size - 1) / 4) + 1;
+ int max_no_words = GP_ARG_NUM_REG - align_words;
+ int rtlvec_len = no_units < max_no_words ? no_units : max_no_words;
+ rtx *rtlvec = (rtx *) alloca (rtlvec_len * sizeof (rtx));
+ memset ((char *) rtlvec, 0, rtlvec_len * sizeof (rtx));
+
+ for (k=0; k < rtlvec_len; k++)
+ rtlvec[k] = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_MIN_REG
+ + align_words + k),
+ k == 0 ? const0_rtx : GEN_INT (k*4));
+
+ return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rtlvec));
+ }
+ }
return NULL_RTX;
}
@@ -4343,7 +4548,11 @@ function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
is either wholly in GPRs or half in GPRs and half not. */
part_mode = DImode;
- return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
+ if ((TARGET_32BIT && TARGET_POWERPC64)
+ || (align_words == GP_ARG_NUM_REG - 2))
+ return rs6000_mixed_function_arg (cum, part_mode, type, align_words);
+ else
+ return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
}
}
else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode))
@@ -5885,6 +6094,7 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
|| arg2 == error_mark_node)
return const0_rtx;
+ *expandedp = true;
STRIP_NOPS (arg2);
if (TREE_CODE (arg2) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2) & ~0x3)
@@ -5902,7 +6112,6 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
if (pat != 0)
emit_insn (pat);
- *expandedp = true;
return NULL_RTX;
}
@@ -6456,6 +6665,18 @@ rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
static void
rs6000_init_builtins (void)
{
+ V2SI_type_node = build_vector_type (intSI_type_node, 2);
+ V2SF_type_node = build_vector_type (float_type_node, 2);
+ V4HI_type_node = build_vector_type (intHI_type_node, 4);
+ V4SI_type_node = build_vector_type (intSI_type_node, 4);
+ V4SF_type_node = build_vector_type (float_type_node, 4);
+ V8HI_type_node = build_vector_type (intHI_type_node, 8);
+ V16QI_type_node = build_vector_type (intQI_type_node, 16);
+
+ unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
+ unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
+ unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
+
opaque_V2SI_type_node = copy_node (V2SI_type_node);
opaque_V2SF_type_node = copy_node (V2SF_type_node);
opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
@@ -6486,10 +6707,10 @@ rs6000_init_builtins (void)
get_identifier ("__pixel"),
pixel_type_node));
- bool_V16QI_type_node = make_vector (V16QImode, bool_char_type_node, 1);
- bool_V8HI_type_node = make_vector (V8HImode, bool_short_type_node, 1);
- bool_V4SI_type_node = make_vector (V4SImode, bool_int_type_node, 1);
- pixel_V8HI_type_node = make_vector (V8HImode, pixel_type_node, 1);
+ bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
+ bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
+ bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
+ pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
(*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
get_identifier ("__vector unsigned char"),
@@ -8648,7 +8869,11 @@ rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
static struct machine_function *
rs6000_init_machine_status (void)
{
- return ggc_alloc_cleared (sizeof (machine_function));
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
+ machine_function *mf = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
+ mf->substitute_pic_base_reg = -1;
+ return mf;
+ /* APPLE LOCAL end volatile pic base reg in leaves */
}
/* These macros test for integers and extract the low-order bits. */
@@ -9478,6 +9703,47 @@ print_operand_address (FILE *file, rtx x)
abort ();
}
+/* APPLE LOCAL begin weak import */
+static void
+find_weak_imports (rtx x)
+{
+ /* Patterns accepted here follow output_addr_const in final.c. */
+ switch ( GET_CODE (x))
+ {
+ case CONST:
+ case ZERO_EXTEND:
+ case SIGN_EXTEND:
+ case SUBREG:
+ find_weak_imports (XEXP (x, 0));
+ break;
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CODE_LABEL:
+ case LABEL_REF:
+ default:
+ break;
+
+ case PLUS:
+ case MINUS:
+ find_weak_imports (XEXP (x, 0));
+ find_weak_imports (XEXP (x, 1));
+ break;
+
+ case SYMBOL_REF:
+ if ( SYMBOL_REF_WEAK_IMPORT (x))
+ {
+ fprintf (asm_out_file, "\t.weak_reference ");
+ assemble_name (asm_out_file, XSTR (x, 0));
+ fprintf (asm_out_file, "\n");
+ /* Attempt to prevent multiple weak_reference directives. */
+ SYMBOL_REF_WEAK_IMPORT (x) = 0;
+ }
+ break;
+ }
+}
+/* APPLE LOCAL end weak import */
+
/* Target hook for assembling integer objects. The PowerPC version has
to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
is defined. It also needs to handle DI-mode objects on 64-bit
@@ -9498,6 +9764,9 @@ rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
if (TARGET_RELOCATABLE
&& !in_toc_section ()
&& !in_text_section ()
+ /* APPLE LOCAL begin hot/cold partitioning */
+ && !in_text_unlikely_section ()
+ /* APPLE LOCAL end hot/cold partitioning */
&& !recurse
&& GET_CODE (x) != CONST_INT
&& GET_CODE (x) != CONST_DOUBLE
@@ -9536,6 +9805,9 @@ rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
}
}
#endif /* RELOCATABLE_NEEDS_FIXUP */
+ /* APPLE LOCAL weak import */
+ if (DEFAULT_ABI == ABI_DARWIN)
+ find_weak_imports (x);
return default_assemble_integer (x, size, aligned_p);
}
@@ -10352,6 +10624,8 @@ first_reg_to_save (void)
#if TARGET_MACHO
if (flag_pic
&& current_function_uses_pic_offset_table
+ /* APPLE LOCAL volatile pic base reg in leaves */
+ && cfun->machine->substitute_pic_base_reg == -1
&& first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
return RS6000_PIC_OFFSET_TABLE_REGNUM;
#endif
@@ -10540,7 +10814,7 @@ rs6000_stack_info (void)
{
static rs6000_stack_t info, zero_info;
rs6000_stack_t *info_ptr = &info;
- int reg_size = TARGET_POWERPC64 ? 8 : 4;
+ int reg_size = TARGET_32BIT ? 4 : 8;
int ehrd_size;
HOST_WIDE_INT total_raw_size;
@@ -10652,7 +10926,7 @@ rs6000_stack_info (void)
info_ptr->varargs_size = RS6000_VARARGS_AREA;
info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
info_ptr->parm_size = RS6000_ALIGN (current_function_outgoing_args_size,
- 8);
+ TARGET_ALTIVEC ? 16 : 8);
if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
info_ptr->spe_gp_size = 8 * (32 - info_ptr->first_gp_reg_save);
@@ -10689,7 +10963,7 @@ rs6000_stack_info (void)
/* Align stack so vector save area is on a quadword boundary. */
if (info_ptr->altivec_size != 0)
info_ptr->altivec_padding_size
- = 16 - (-info_ptr->vrsave_save_offset % 16);
+ = (16 - (-info_ptr->vrsave_save_offset % 16)) % 16;
else
info_ptr->altivec_padding_size = 0;
@@ -10768,10 +11042,10 @@ rs6000_stack_info (void)
+ ehrd_size
+ info_ptr->cr_size
+ info_ptr->lr_size
- + info_ptr->vrsave_size
+ /* APPLE LOCAL fix redundant add? */
+ info_ptr->toc_size,
- (TARGET_ALTIVEC_ABI || ABI_DARWIN)
- ? 16 : 8);
+ /* APPLE LOCAL darwin native */
+ (TARGET_ALTIVEC_ABI ? 16 : 8));
total_raw_size = (info_ptr->vars_size
+ info_ptr->parm_size
@@ -11055,6 +11329,9 @@ static bool
rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
{
tree type;
+ /* APPLE LOCAL -mlong-branch */
+ if (TARGET_LONG_BRANCH)
+ return 0;
if (decl)
{
if (TARGET_ALTIVEC_VRSAVE)
@@ -11669,6 +11946,131 @@ generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
return insn;
}
+/* APPLE LOCAL begin special ObjC method use of R12 */
+static int objc_method_using_pic = 0;
+
+/* Determine whether a name is an ObjC method. */
+static int name_encodes_objc_method_p (const char *piclabel_name)
+{
+ return (piclabel_name[0] == '*' && piclabel_name[1] == '"'
+ ? (piclabel_name[2] == 'L'
+ && (piclabel_name[3] == '+' || piclabel_name[3] == '-'))
+ : (piclabel_name[1] == 'L'
+ && (piclabel_name[2] == '+' || piclabel_name[2] == '-')));
+}
+/* APPLE LOCAL end special ObjC method use of R12 */
+
+/* APPLE LOCAL begin recompute PIC register use */
+/* Sometimes a function has references that require the PIC register,
+ but optimization removes them all. To catch this case
+ recompute current_function_uses_pic_offset_table here.
+ This may allow us to eliminate the prologue and epilogue. */
+
+static int
+recompute_PIC_register_use (void)
+{
+ if (DEFAULT_ABI == ABI_DARWIN
+ && flag_pic && current_function_uses_pic_offset_table
+ && !cfun->machine->ra_needs_full_frame)
+ {
+ rtx insn;
+ current_function_uses_pic_offset_table = 0;
+ push_topmost_sequence ();
+ for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
+ if ( reg_mentioned_p (pic_offset_table_rtx, insn))
+ {
+ current_function_uses_pic_offset_table = 1;
+ break;
+ }
+ pop_topmost_sequence ();
+ }
+ return 0;
+}
+/* APPLE LOCAL end recompute PIC register use */
+
+/* APPLE LOCAL begin volatile pic base reg in leaves */
+/* If this is a leaf function and we used any pic-based references,
+ see if there is an unused volatile reg we can use instead of R31.
+ If so set substitute_pic_base_reg to this reg, set its reg_ever_used
+ bit (to avoid confusing later calls to alloc_volatile_reg), and
+ make a pass through the existing RTL, substituting the new reg for
+ the old one wherever it appears.
+ Logically this is a void function; it is int so it can be used to
+ initialize a dummy variable, thus getting executed ahead of other
+ initializations. Technicolour yawn. */
+
+/* ALLOC_VOLATILE_REG allocates a volatile register AFTER all gcc
+ register allocations have been done; we use it to reserve an
+ unused reg for holding VRsave. Returns -1 in case of failure (all
+ volatile regs are in use.) */
+/* Note, this is called from both the prologue and epilogue code,
+ with the assumption that it will return the same result both
+ times! Since the register arrays are not changed in between
+ this is valid, if a bit fragile. */
+/* In future we may also use this to grab an unused volatile reg to
+ hold the PIC base reg in the event that the current function makes
+ no procedure calls; this was done in 2.95. */
+static int
+alloc_volatile_reg (void)
+{
+ if (current_function_is_leaf
+ && reload_completed
+ && !cfun->machine->ra_needs_full_frame)
+ {
+ int r;
+ for (r = 10; r >= 2; --r)
+ if (! fixed_regs[r] && ! regs_ever_live[r])
+ return r;
+ }
+
+ return -1; /* fail */
+}
+
+static int
+try_leaf_pic_optimization (void)
+{
+ if ( DEFAULT_ABI==ABI_DARWIN
+ && flag_pic && current_function_uses_pic_offset_table
+ && current_function_is_leaf
+ && !cfun->machine->ra_needs_full_frame )
+ {
+ int reg = alloc_volatile_reg ();
+ if ( reg != -1 )
+ {
+ /* Run through the insns, changing references to the original
+ PIC_OFFSET_TABLE_REGNUM to our new one. */
+ rtx insn;
+ const int nregs = PIC_OFFSET_TABLE_REGNUM + 1;
+ rtx *reg_map = (rtx *) xmalloc (nregs * sizeof (rtx));
+ memset (reg_map, 0, nregs * sizeof (rtx));
+ reg_map[PIC_OFFSET_TABLE_REGNUM] = gen_rtx_REG (SImode, reg);
+
+ push_topmost_sequence ();
+ for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == INSN || GET_CODE (insn) == JUMP_INSN)
+ {
+ replace_regs (PATTERN (insn), reg_map, nregs, 1);
+ replace_regs (REG_NOTES (insn), reg_map, nregs, 1);
+ }
+ else if (GET_CODE (insn) == CALL_INSN)
+ {
+ if ( !SIBLING_CALL_P (insn))
+ abort ();
+ }
+ }
+ pop_topmost_sequence ();
+ free (reg_map);
+
+ regs_ever_live[reg] = 1;
+ regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 0;
+ cfun->machine->substitute_pic_base_reg = reg;
+ }
+ }
+ return 0;
+}
+/* APPLE LOCAL end volatile pic base reg in leaves */
+
/* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
@@ -11738,9 +12140,13 @@ gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
void
rs6000_emit_prologue (void)
{
+ /* APPLE LOCAL recompute PIC register use */
+ int dummy ATTRIBUTE_UNUSED = recompute_PIC_register_use ();
+ /* APPLE LOCAL volatile pic base reg in leaves */
+ int ignored ATTRIBUTE_UNUSED = try_leaf_pic_optimization ();
rs6000_stack_t *info = rs6000_stack_info ();
enum machine_mode reg_mode = Pmode;
- int reg_size = UNITS_PER_WORD;
+ int reg_size = TARGET_32BIT ? 4 : 8;
rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
rtx frame_ptr_rtx = gen_rtx_REG (Pmode, 12);
rtx frame_reg_rtx = sp_reg_rtx;
@@ -11749,7 +12155,24 @@ rs6000_emit_prologue (void)
int saving_FPRs_inline;
int using_store_multiple;
HOST_WIDE_INT sp_offset = 0;
+ /* APPLE LOCAL: callers_lr_already_saved */
+ int callers_lr_already_saved = 0;
+#if TARGET_MACHO
+ int lr_already_set_up_for_pic = 0;
+#endif
+ /* APPLE LOCAL special ObjC method use of R12 */
+ objc_method_using_pic = 0;
+ /* APPLE LOCAL BEGIN fix-and-continue mrs */
+ if (TARGET_FIX_AND_CONTINUE)
+ {
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ }
+ /* APPLE LOCAL END fix-and-continue mrs */
+
if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
{
reg_mode = V2SImode;
@@ -11785,6 +12208,31 @@ rs6000_emit_prologue (void)
rs6000_emit_stack_tie ();
}
+ /* APPLE LOCAL begin special ObjC method use of R12 */
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN
+ && current_function_uses_pic_offset_table && flag_pic)
+ {
+ const char *piclabel_name = machopic_function_base_name ();
+
+ if (name_encodes_objc_method_p (piclabel_name)
+ /* If we're saving vector or FP regs via a function call,
+ then don't bother with this ObjC R12 optimization.
+ This test also eliminates world_save. */
+ && (info->first_altivec_reg_save > LAST_ALTIVEC_REGNO
+ || VECTOR_SAVE_INLINE (info->first_altivec_reg_save))
+ && (info->first_fp_reg_save == 64
+ || FP_SAVE_INLINE (info->first_fp_reg_save)))
+ {
+ /* We cannot output the label now; there seems to be no
+ way to prevent cfgcleanup from deleting it. It is done
+ in rs6000_output_function_prologue with fprintf! */
+ objc_method_using_pic = 1;
+ }
+ }
+#endif /* TARGET_MACHO */
+ /* APPLE LOCAL end special ObjC method use of R12 */
+
/* Save AltiVec registers if needed. */
if (TARGET_ALTIVEC_ABI && info->altivec_size != 0)
{
@@ -11862,7 +12310,12 @@ rs6000_emit_prologue (void)
/* If we need to save CR, put it into r12. */
if (info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
{
- cr_save_rtx = gen_rtx_REG (SImode, 12);
+ /* APPLE LOCAL begin special ObjC method use of R12 */
+ /* For Darwin, use R2, so we don't clobber the special ObjC
+ method use of R12. R11 has a special meaning for Ada, so we
+ can't use that. */
+ cr_save_rtx = gen_rtx_REG (SImode, DEFAULT_ABI == ABI_DARWIN ? 2 : 12);
+ /* APPLE LOCAL end special ObjC method use of R12 */
emit_insn (gen_movesi_from_cr (cr_save_rtx));
}
@@ -11885,17 +12338,57 @@ rs6000_emit_prologue (void)
char rname[30];
const char *alloc_rname;
rtvec p;
- p = rtvec_alloc (2 + 64 - info->first_fp_reg_save);
+
+ /* APPLE LOCAL begin Reduce code size / improve performance */
+ int gen_following_label = 0;
+ int count = 0;
+
+ if (current_function_uses_pic_offset_table && flag_pic
+#ifdef INSN_SCHEDULING
+ /* Prevent the compiler from crashing
+ while scheduling insns after global_alloc! */
+ && (optimize == 0 || !flag_schedule_insns_after_reload)
+#endif
+ /* If this is the last CALL in the prolog, then we've got our PC.
+ If we're saving AltiVec regs via a function, we're not last. */
+ && (info->first_altivec_reg_save > LAST_ALTIVEC_REGNO
+ || VECTOR_SAVE_INLINE (info->first_altivec_reg_save)))
+ gen_following_label = lr_already_set_up_for_pic = 1;
+
+ /* APPLE LOCAL: +2 (could be conditionalized) */
+ p = rtvec_alloc (2 + 64 - info->first_fp_reg_save + 2
+ + gen_following_label);
- RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode,
+ RTVEC_ELT (p, count++) = gen_rtx_CLOBBER (VOIDmode,
gen_rtx_REG (Pmode,
LINK_REGISTER_REGNUM));
+ /* APPLE LOCAL begin reduce code size */
+#if TARGET_MACHO
+ /* We have to calculate the offset into saveFP to where we must
+ call (!!) SAVEFP also saves the caller's LR -- placed into
+ R0 above -- into 8(R1). SAVEFP/RESTOREFP should never be
+ called to save or restore only F31. */
+
+ if (info->lr_save_offset != 8 || info->first_fp_reg_save == 63)
+ abort ();
+
+ sprintf (rname, "*saveFP%s%.0d ; save f%d-f31",
+ (info->first_fp_reg_save - 32 == 14 ? "" : "+"),
+ (info->first_fp_reg_save - 46) * 4,
+ info->first_fp_reg_save - 32);
+#else
+ /* APPLE LOCAL end reduce code size */
sprintf (rname, "%s%d%s", SAVE_FP_PREFIX,
info->first_fp_reg_save - 32, SAVE_FP_SUFFIX);
+ /* APPLE LOCAL reduce code size */
+#endif /* TARGET_MACHO */
alloc_rname = ggc_strdup (rname);
- RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode,
+ RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode,
gen_rtx_SYMBOL_REF (Pmode,
alloc_rname));
+ /* APPLE LOCAL reduce code size */
+ if ( gen_following_label )
+ RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode, const0_rtx);
for (i = 0; i < 64 - info->first_fp_reg_save; i++)
{
rtx addr, reg, mem;
@@ -11906,11 +12399,31 @@ rs6000_emit_prologue (void)
mem = gen_rtx_MEM (DFmode, addr);
set_mem_alias_set (mem, rs6000_sr_alias_set);
- RTVEC_ELT (p, i + 2) = gen_rtx_SET (VOIDmode, mem, reg);
+ RTVEC_ELT (p, count++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ /* APPLE LOCAL begin fix 2866661 */
+#if TARGET_MACHO
+ /* Darwin version of these functions stores R0. */
+ RTVEC_ELT (p, count++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
+
+ /* If we saved LR, *tell* people about it! */
+ if (info->lr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset + sp_offset));
+ rtx mem = gen_rtx_MEM (Pmode, addr);
+ /* This should not be of rs6000_sr_alias_set, because of
+ __builtin_return_address. */
+ RTVEC_ELT (p, count++) = gen_rtx_SET (Pmode, mem,
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
}
+#endif
+ /* APPLE LOCAL end fix 2866661 */
insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
NULL_RTX, NULL_RTX);
+ /* APPLE LOCAL: callers_lr_already_saved */
+ callers_lr_already_saved = 1;
}
/* Save GPRs. This is done as a PARALLEL if we are using
@@ -11945,7 +12458,11 @@ rs6000_emit_prologue (void)
&& ! call_used_regs[info->first_gp_reg_save+i])
|| (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
- || (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic
+ && current_function_uses_pic_offset_table
+ && cfun->machine->substitute_pic_base_reg == -1))))
+ /* APPLE LOCAL end volatile pic base reg in leaves */
{
rtx addr, reg, mem;
reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
@@ -12027,8 +12544,18 @@ rs6000_emit_prologue (void)
}
}
+ /* APPLE LOCAL special ObjC method use of R12 */
+ if (objc_method_using_pic)
+ rs6000_maybe_dead (
+ emit_move_insn (gen_rtx_REG (Pmode,
+ cfun->machine->substitute_pic_base_reg == -1
+ ? PIC_OFFSET_TABLE_REGNUM
+ : cfun->machine->substitute_pic_base_reg),
+ gen_rtx_REG (Pmode, 12)));
+
/* Save lr if we used it. */
- if (info->lr_save_p)
+ /* APPLE LOCAL: callers_lr_already_saved */
+ if (info->lr_save_p && !callers_lr_already_saved)
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->lr_save_offset + sp_offset));
@@ -12111,17 +12638,38 @@ rs6000_emit_prologue (void)
#if TARGET_MACHO
if (DEFAULT_ABI == ABI_DARWIN
+ /* APPLE LOCAL special ObjC method use of R12 */
+ && !objc_method_using_pic
&& flag_pic && current_function_uses_pic_offset_table)
{
rtx dest = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
const char *picbase = machopic_function_base_name ();
rtx src = gen_rtx_SYMBOL_REF (Pmode, picbase);
- rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (dest, src)));
+ /* APPLE LOCAL begin save and restore LR */
+ /* Save and restore LR locally around this call (in R0). */
+ if (!info->lr_save_p)
+ rs6000_maybe_dead (emit_move_insn (gen_rtx_REG (Pmode, 0), dest));
+ /* APPLE LOCAL end save and restore LR */
+
+ /* APPLE LOCAL begin performance enhancement */
+#if TARGET_MACHO
+ if (!lr_already_set_up_for_pic)
+ rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (dest, src)));
+#endif
+ /* APPLE LOCAL end performance enhancement */
+ /* APPLE LOCAL begin volatile pic base reg in leaves */
rs6000_maybe_dead (
- emit_move_insn (gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM),
- gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)));
+ emit_move_insn (
+ gen_rtx_REG (Pmode,
+ cfun->machine->substitute_pic_base_reg == -1
+ ? RS6000_PIC_OFFSET_TABLE_REGNUM
+ : cfun->machine->substitute_pic_base_reg),
+ dest));
+ if (!info->lr_save_p)
+ rs6000_maybe_dead (emit_move_insn (dest, gen_rtx_REG (Pmode, 0)));
+ /* APPLE LOCAL end */
}
#endif
}
@@ -12137,6 +12685,8 @@ rs6000_output_function_prologue (FILE *file,
if (TARGET_DEBUG_STACK)
debug_stack_info (info);
+ /* APPLE LOCAL do not extern fp save/restore */
+#if !TARGET_MACHO
/* Write .extern for any function we will call to save and restore
fp values. */
if (info->first_fp_reg_save < 64
@@ -12145,6 +12695,8 @@ rs6000_output_function_prologue (FILE *file,
SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX,
RESTORE_FP_PREFIX, info->first_fp_reg_save - 32,
RESTORE_FP_SUFFIX);
+ /* APPLE LOCAL do not extern fp save/restore */
+#endif /* !TARGET_MACHO */
/* Write .extern for AIX common mode routines, if needed. */
if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
@@ -12158,6 +12710,16 @@ rs6000_output_function_prologue (FILE *file,
common_mode_defined = 1;
}
+ /* APPLE LOCAL special ObjC method use of R12 */
+#if TARGET_MACHO
+ if ( HAVE_prologue && DEFAULT_ABI == ABI_DARWIN && objc_method_using_pic )
+ {
+ /* APPLE FIXME isn't there an asm macro to do all this? */
+ const char* piclabel = machopic_function_base_name ();
+ fprintf(file, "%s:\n", (*piclabel == '*') ? piclabel + 1 : piclabel);
+ }
+#endif
+
if (! HAVE_prologue)
{
start_sequence ();
@@ -12207,7 +12769,7 @@ rs6000_emit_epilogue (int sibcall)
rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
rtx frame_reg_rtx = sp_reg_rtx;
enum machine_mode reg_mode = Pmode;
- int reg_size = UNITS_PER_WORD;
+ int reg_size = TARGET_32BIT ? 4 : 8;
int i;
info = rs6000_stack_info ();
@@ -12232,6 +12794,8 @@ rs6000_emit_epilogue (int sibcall)
using_mfcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
|| rs6000_cpu == PROCESSOR_PPC603
|| rs6000_cpu == PROCESSOR_PPC750
+ /* APPLE LOCAL ? */
+ || rs6000_cpu == PROCESSOR_PPC7400
|| optimize_size);
/* If we have a frame pointer, a call to alloca, or a large stack
@@ -12324,7 +12888,9 @@ rs6000_emit_epilogue (int sibcall)
set_mem_alias_set (mem, rs6000_sr_alias_set);
- emit_move_insn (gen_rtx_REG (SImode, 12), mem);
+ /* APPLE LOCAL use R11 because of ObjC use of R12 in sibcall to CTR */
+ emit_move_insn (gen_rtx_REG (SImode,
+ DEFAULT_ABI == ABI_DARWIN ? 11 : 12), mem);
}
/* Set LR here to try to overlap restores below. */
@@ -12394,7 +12960,11 @@ rs6000_emit_epilogue (int sibcall)
&& ! call_used_regs[info->first_gp_reg_save+i])
|| (i+info->first_gp_reg_save == RS6000_PIC_OFFSET_TABLE_REGNUM
&& ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
- || (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
+ /* APPLE LOCAL begin darwin native */
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic
+ && current_function_uses_pic_offset_table
+ && cfun->machine->substitute_pic_base_reg == -1))))
+ /* APPLE LOCAL end darwin native */
{
rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
GEN_INT (info->gp_save_offset
@@ -12448,7 +13018,9 @@ rs6000_emit_epilogue (int sibcall)
/* If we saved cr, restore it here. Just those that were used. */
if (info->cr_save_p)
{
- rtx r12_rtx = gen_rtx_REG (SImode, 12);
+ /* APPLE LOCAL use R11 because of ObjC use of R12 in sibcall to CTR */
+ /* APPLE LOCAL silly name retained to minimize deviation from FSF */
+ rtx r12_rtx = gen_rtx_REG (SImode, DEFAULT_ABI == ABI_DARWIN ? 11 : 12);
int count = 0;
if (using_mfcr_multiple)
@@ -12548,8 +13120,25 @@ rs6000_emit_epilogue (int sibcall)
char rname[30];
const char *alloc_rname;
+ /* APPLE LOCAL begin code size reduction / performance enhancement */
+#if TARGET_MACHO
+ /* We have to calculate the offset into RESTFP to where we must
+ call (!!) RESTFP also restores the caller's LR from 8(R1).
+ RESTFP should *never* be called to restore only F31. */
+
+ if (info->lr_save_offset != 8 || info->first_fp_reg_save == 63)
+ abort ();
+
+ sprintf (rname, "*restFP%s%.0d ; restore f%d-f31",
+ (info->first_fp_reg_save - 32 == 14 ? "" : "+"),
+ (info->first_fp_reg_save - 46) * 4,
+ info->first_fp_reg_save - 32);
+#else
+ /* APPLE LOCAL end code size reduction / performance enhancement */
sprintf (rname, "%s%d%s", RESTORE_FP_PREFIX,
info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
+ /* APPLE LOCAL code size reduction / performance enhancement */
+#endif /* TARGET_MACHO */
alloc_rname = ggc_strdup (rname);
RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode,
gen_rtx_SYMBOL_REF (Pmode,
@@ -14663,6 +15252,11 @@ rs6000_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
const struct attribute_spec rs6000_attribute_table[] =
{
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ /* APPLE LOCAL begin double destructor */
+#ifdef SUBTARGET_ATTRIBUTE_TABLE
+ SUBTARGET_ATTRIBUTE_TABLE
+#endif
+ /* APPLE LOCAL end double destructor */
{ "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute },
{ "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
{ "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
@@ -14756,6 +15350,21 @@ rs6000_handle_altivec_attribute (tree *node, tree name, tree args,
return NULL_TREE;
}
+/* AltiVec defines four built-in scalar types that serve as vector
+ elements; we must teach the compiler how to mangle them. */
+
+static const char *
+rs6000_mangle_fundamental_type (tree type)
+{
+ if (type == bool_char_type_node) return "U6__boolc";
+ if (type == bool_short_type_node) return "U6__bools";
+ if (type == pixel_type_node) return "u7__pixel";
+ if (type == bool_int_type_node) return "U6__booli";
+
+ /* For all other types, use normal C++ mangling. */
+ return NULL;
+}
+
/* Handle a "longcall" or "shortcall" attribute; arguments as in
struct attribute_spec.handler. */
@@ -14998,6 +15607,7 @@ symbolic_operand (rtx op)
#if TARGET_MACHO
static tree branch_island_list = 0;
+static int local_label_unique_number = 0;
/* Remember to generate a branch island for far calls to the given
function. */
@@ -15027,17 +15637,20 @@ macho_branch_islands (void)
{
char tmp_buf[512];
tree branch_island;
+ const char *name;
+ const char *label;
+ char name_buf[512];
+ char *local_label_0;
+ const char *non_lazy_pointer_name, *unencoded_non_lazy_pointer_name;
+ int length;
for (branch_island = branch_island_list;
branch_island;
branch_island = TREE_CHAIN (branch_island))
{
- const char *label =
- IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island));
- const char *name =
- darwin_strip_name_encoding (
- IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island)));
- char name_buf[512];
+ label = IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island));
+ name = darwin_strip_name_encoding (
+ IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island)));
/* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
if (name[0] == '*' || name[0] == '&')
strcpy (name_buf, name+1);
@@ -15053,15 +15666,66 @@ macho_branch_islands (void)
fprintf (asm_out_file, "\t.stabd 68,0," HOST_WIDE_INT_PRINT_UNSIGNED "\n",
BRANCH_ISLAND_LINE_NUMBER(branch_island));
#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
- if (flag_pic)
+ /* If PIC and the callee has no stub, do an indirect call through a
+ non-lazy-pointer. 'save_world' expects a parameter in R11;
+ theh dyld_stub_binding_helper (part of the Mach-O stub
+ interface) expects a different parameter in R11. This is
+ effectively a "non-lazy stub." By-the-way, a
+ "non-lazy-pointer" is a .long that gets coalesced with others
+ of the same value, so one NLP suffices for an entire
+ application. */
+ if (flag_pic && (machopic_classify_ident (get_identifier (name)) == MACHOPIC_UNDEFINED))
+ {
+ /* This is the address of the non-lazy pointer; load from it
+ to get the address we want. */
+ non_lazy_pointer_name = machopic_non_lazy_ptr_name (name);
+ machopic_validate_stub_or_non_lazy_ptr (non_lazy_pointer_name,
+ /* non-lazy-pointer */0);
+ unencoded_non_lazy_pointer_name =
+ (*targetm.strip_name_encoding) (non_lazy_pointer_name);
+ length = strlen (name);
+ local_label_0 = alloca (length + 32);
+ /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
+ if (name[0] == '*' || name[0] == '&')
+ strcpy (name_buf, name+1);
+ else
+ {
+ name_buf[0] = '_';
+ strcpy (name_buf+1, name);
+ }
+
+ sprintf (local_label_0, "%s_%d_pic", local_label_unique_number, name_buf);
+ local_label_unique_number++;
+ strcpy (tmp_buf, "\n");
+ strcat (tmp_buf, label);
+ strcat (tmp_buf, "\tmflr r0\n");
+ strcat (tmp_buf, "\tbcl 20,31,");
+ strcat (tmp_buf, "\tbcl 20,31,%s\n");
+ strcat (tmp_buf, local_label_0);
+ strcat (tmp_buf, ":\n");
+ strcat (tmp_buf, "\tmflr r12\n");
+ strcat (tmp_buf, "\taddis r12,r12,ha16(");
+ strcat (tmp_buf, non_lazy_pointer_name);
+ strcat (tmp_buf, "-");
+ strcat (tmp_buf, local_label_0);
+ strcat (tmp_buf, ")\n\tlwz r12,lo16(");
+ strcat (tmp_buf, non_lazy_pointer_name);
+ strcat (tmp_buf, "-");
+ strcat (tmp_buf, local_label_0);
+ strcat (tmp_buf, ")(r12)\n");
+ strcat (tmp_buf, "\tmtlr r0\n");
+ strcat (tmp_buf, "\tmtctr r12\n");
+ strcat (tmp_buf, "\tbctr\n");
+ }
+ else if (flag_pic)
{
strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
strcat (tmp_buf, label);
strcat (tmp_buf, "_pic\n");
strcat (tmp_buf, label);
- strcat (tmp_buf, "_pic:\n\tmflr r11\n");
+ strcat (tmp_buf, "_pic:\n\tmflr r12\n");
- strcat (tmp_buf, "\taddis r11,r11,ha16(");
+ strcat (tmp_buf, "\taddis r12,r12,ha16(");
strcat (tmp_buf, name_buf);
strcat (tmp_buf, " - ");
strcat (tmp_buf, label);
@@ -15069,7 +15733,7 @@ macho_branch_islands (void)
strcat (tmp_buf, "\tmtlr r0\n");
- strcat (tmp_buf, "\taddi r12,r11,lo16(");
+ strcat (tmp_buf, "\taddi r12,r12,lo16(");
strcat (tmp_buf, name_buf);
strcat (tmp_buf, " - ");
strcat (tmp_buf, label);
@@ -15135,12 +15799,55 @@ char *
output_call (rtx insn, rtx *operands, int dest_operand_number, int cookie_operand_number)
{
static char buf[256];
+ const char *far_call_instr_str=NULL, *near_call_instr_str=NULL;
+ rtx pattern;
+
+ switch (GET_CODE (insn))
+ {
+ case CALL_INSN:
+ far_call_instr_str = "jbsr";
+ near_call_instr_str = "bl";
+ pattern = NULL_RTX;
+ break;
+ case JUMP_INSN:
+ far_call_instr_str = "jmp";
+ near_call_instr_str = "b";
+ pattern = NULL_RTX;
+ break;
+ case INSN:
+ pattern = PATTERN (insn);
+ break;
+ default:
+ abort();
+ break;
+ }
+
if (GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
&& (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
{
tree labelname;
tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
+ /* This insn represents a prologue or epilogue. */
+ if ((pattern != NULL_RTX) && GET_CODE (pattern) == PARALLEL)
+ {
+ rtx parallel_first_op = XVECEXP (pattern, 0, 0);
+ switch (GET_CODE (parallel_first_op))
+ {
+ case CLOBBER: /* Prologue: a call to save_world. */
+ far_call_instr_str = "jbsr";
+ near_call_instr_str = "bl";
+ break;
+ case RETURN: /* Epilogue: a call to rest_world. */
+ far_call_instr_str = "jmp";
+ near_call_instr_str = "b";
+ break;
+ default:
+ abort();
+ break;
+ }
+ }
+
if (no_previous_def (funname))
{
int line_number = 0;
@@ -15303,6 +16010,129 @@ toc_section (void)
#endif /* TARGET_MACHO */
+/* APPLE LOCAL begin Macintosh alignment 2002-1-22 ff */
+/* Return the alignment of a struct based on the Macintosh PowerPC
+ alignment rules. In general the alignment of a struct is
+ determined by the greatest alignment of its elements. However, the
+ PowerPC rules cause the alignment of a struct to peg at word
+ alignment except when the first field has greater than word
+ (32-bit) alignment, in which case the alignment is determined by
+ the alignment of the first field. */
+
+unsigned
+round_type_align (tree the_struct, unsigned computed, unsigned specified)
+{
+ if (TARGET_ALTIVEC && TREE_CODE (the_struct) == VECTOR_TYPE)
+ {
+ /* All vectors are (at least) 16-byte aligned. A struct or
+ union with a vector element is also 16-byte aligned. */
+ return MAX (RS6000_VECTOR_ALIGNMENT, MAX (computed, specified));
+ }
+
+ if (TREE_CODE (the_struct) == RECORD_TYPE
+ || TREE_CODE (the_struct) == UNION_TYPE
+ || TREE_CODE (the_struct) == QUAL_UNION_TYPE)
+ {
+ tree first_field = TYPE_FIELDS (the_struct);
+
+ /* Skip past static fields, enums, and constant fields that are
+ not really a part of the record layout. */
+ while ((first_field != 0)
+ && (TREE_CODE (first_field) != FIELD_DECL))
+ first_field = TREE_CHAIN (first_field);
+
+ if (first_field != 0)
+ {
+ /* If other-than-default alignment (which includes mac68k
+ mode) is in effect, then no adjustments to the alignment
+ should be necessary. Ditto if the struct has the
+ __packed__ attribute. */
+ if (TYPE_PACKED (the_struct) || TARGET_ALIGN_MAC68K
+ || TARGET_ALIGN_NATURAL || maximum_field_alignment != 0)
+ /* Do nothing */ ;
+ else
+ {
+ /* The following code handles Macintosh PowerPC
+ alignment. The implementation is complicated by the
+ fact that BIGGEST_ALIGNMENT is 128 when AltiVec is
+ enabled and 32 when it is not. So when AltiVec is
+ not enabled, alignment is generally limited to word
+ alignment. Consequently, the alignment of unions has
+ to be recalculated if AltiVec is not enabled.
+
+ Below we explicitly test for fields with greater than
+ word alignment: doubles, long longs, and structs and
+ arrays with greater than word alignment. */
+ unsigned val;
+ tree field_type;
+
+ val = MAX (computed, specified);
+
+ if (TREE_CODE (the_struct) == UNION_TYPE && !TARGET_ALTIVEC)
+ {
+ tree field = first_field;
+
+ while (field != 0)
+ {
+ /* Don't consider statics, enums and constant fields
+ which are not really a part of the record. */
+ if (TREE_CODE (field) != FIELD_DECL)
+ {
+ field = TREE_CHAIN (field);
+ continue;
+ }
+ field_type = TREE_TYPE(field);
+ if (TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE)
+ field_type = get_inner_array_type (field);
+ else
+ field_type = TREE_TYPE (field);
+ val = MAX (TYPE_ALIGN (field_type), val);
+ if (FLOAT_TYPE_P (field_type)
+ && TYPE_MODE (field_type) == DFmode)
+ val = MAX (RS6000_DOUBLE_ALIGNMENT, val);
+ else if (INTEGRAL_TYPE_P (field_type)
+ && TYPE_MODE (field_type) == DImode)
+ val = MAX (RS6000_LONGLONG_ALIGNMENT, val);
+ field = TREE_CHAIN (field);
+ }
+ }
+ else
+ {
+ if (TREE_CODE (TREE_TYPE (first_field)) == ARRAY_TYPE)
+ field_type = get_inner_array_type (first_field);
+ else
+ field_type = TREE_TYPE (first_field);
+
+ if (field_type == error_mark_node)
+ return val;
+ val = MAX (TYPE_ALIGN (field_type), val);
+
+ if (FLOAT_TYPE_P (field_type)
+ && TYPE_MODE (field_type) == DFmode)
+ val = MAX (RS6000_DOUBLE_ALIGNMENT, val);
+ else if (INTEGRAL_TYPE_P (field_type)
+ && TYPE_MODE (field_type) == DImode)
+ val = MAX (RS6000_LONGLONG_ALIGNMENT, val);
+ }
+
+ return val;
+ }
+ } /* first_field != 0 */
+
+ /* Ensure all MAC68K structs are at least 16-bit aligned.
+ Unless the struct has __attribute__ ((packed)). */
+
+ if (TARGET_ALIGN_MAC68K && ! TYPE_PACKED (the_struct))
+ {
+ if (computed < 16)
+ computed = 16;
+ }
+ } /* RECORD_TYPE, etc */
+
+ return (MAX (computed, specified));
+}
+/* APPLE LOCAL end Macintosh alignment 2002-1-22 ff */
+
#if TARGET_ELF
static unsigned int
rs6000_elf_section_type_flags (tree decl, const char *name, int reloc)
diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
index 641e4bbb8af..d36046211ad 100644
--- a/gcc/config/rs6000/rs6000.h
+++ b/gcc/config/rs6000/rs6000.h
@@ -23,6 +23,9 @@
/* Note that some other tm.h files include this one and then override
many of the definitions. */
+/* APPLE LOCAL fat builds */
+#define DEFAULT_TARGET_ARCH "ppc"
+
/* Definitions for the object file format. These are set at
compile-time. */
@@ -109,8 +112,10 @@
program.
Do not define this macro if it does not need to do anything. */
-
+
+#ifndef SUBTARGET_EXTRA_SPECS
#define SUBTARGET_EXTRA_SPECS
+#endif
#define EXTRA_SPECS \
{ "cpp_default", CPP_DEFAULT_SPEC }, \
@@ -197,6 +202,15 @@ extern int target_flags;
0x00100000, and sysv4.h uses 0x00800000 -> 0x40000000.
0x80000000 is not available because target_flags is signed. */
+/* APPLE LOCAL long-branch */
+/* gen call addr in register for >64M range */
+#define MASK_LONG_BRANCH 0x02000000
+
+/* APPLE LOCAL BEGIN fix-and-continue mrs */
+#define MASK_FIX_AND_CONTINUE 0x04000000
+#define MASK_INDIRECT_ALL_DATA 0x08000000
+/* APPLE LOCAL END fix-and-continue mrs */
+
#define TARGET_POWER (target_flags & MASK_POWER)
#define TARGET_POWER2 (target_flags & MASK_POWER2)
#define TARGET_POWERPC (target_flags & MASK_POWERPC)
@@ -215,6 +229,8 @@ extern int target_flags;
#define TARGET_SCHED_PROLOG (target_flags & MASK_SCHED_PROLOG)
#define TARGET_ALTIVEC (target_flags & MASK_ALTIVEC)
#define TARGET_AIX_STRUCT_RET (target_flags & MASK_AIX_STRUCT_RET)
+/* APPLE LOCAL long-branch */
+#define TARGET_LONG_BRANCH (target_flags & MASK_LONG_BRANCH)
/* Define TARGET_MFCRF if the target assembler supports the optional
field operand for mfcr and the target processor supports the
@@ -226,7 +242,6 @@ extern int target_flags;
#define TARGET_MFCRF 0
#endif
-
#define TARGET_32BIT (! TARGET_64BIT)
#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
#define TARGET_UPDATE (! TARGET_NO_UPDATE)
@@ -248,6 +263,10 @@ extern int target_flags;
#endif
#define TARGET_XL_CALL 0
+/* APPLE LOCAL BEGIN fix-and-continue mrs */
+#define TARGET_FIX_AND_CONTINUE (target_flags & MASK_FIX_AND_CONTINUE)
+#define TARGET_INDIRECT_ALL_DATA (target_flags & MASK_INDIRECT_ALL_DATA)
+/* APPLE LOCAL END fix-and-continue mrs */
/* Run-time compilation parameters selecting different hardware subsets.
@@ -346,6 +365,23 @@ extern int target_flags;
""}, \
{"no-svr4-struct-return", MASK_AIX_STRUCT_RET, \
""}, \
+ /* APPLE LOCAL long-branch */ \
+ {"long-branch", MASK_LONG_BRANCH, \
+ N_("Generate 32-bit call addresses (range > 64M)")}, \
+ {"no-long-branch", -MASK_LONG_BRANCH, ""}, \
+ {"longcall", MASK_LONG_BRANCH, \
+ N_("Generate 32-bit call addresses (range > 64M)")}, \
+ {"no-longcall", -MASK_LONG_BRANCH, ""}, \
+ /* APPLE LOCAL BEGIN fix-and-continue mrs */ \
+ {"fix-and-continue", MASK_FIX_AND_CONTINUE, \
+ N_("Generate code suitable for fast turn around debugging")}, \
+ {"no-fix-and-continue", -MASK_FIX_AND_CONTINUE, \
+ N_("Don't generate code suitable for fast turn around debugging")},\
+ {"indirect-data", MASK_INDIRECT_ALL_DATA, \
+ N_("Generate code suitable for fast turn around debugging")}, \
+ {"no-indirect-data", -MASK_INDIRECT_ALL_DATA, \
+ N_("Don't generate code suitable for fast turn around debugging")},\
+ /* APPLE LOCAL END fix-and-continue mrs */ \
{"mfcrf", MASK_MFCRF, \
N_("Generate single field mfcr instruction")}, \
{"no-mfcrf", - MASK_MFCRF, \
@@ -539,6 +575,10 @@ extern const char *rs6000_warn_altivec_long_switch;
#define MASK_ALIGN_POWER 0x00000000
#define MASK_ALIGN_NATURAL 0x00000001
#define TARGET_ALIGN_NATURAL (rs6000_alignment_flags & MASK_ALIGN_NATURAL)
+/* APPLE LOCAL begin Macintosh alignment 2002-2-26 ff */
+#define MASK_ALIGN_MAC68K 0x00000002
+#define TARGET_ALIGN_MAC68K (rs6000_alignment_flags & MASK_ALIGN_MAC68K)
+/* APPLE LOCAL end Macintosh alignment 2002-2-26 ff */
#else
#define TARGET_ALIGN_NATURAL 0
#endif
@@ -728,6 +768,13 @@ extern const char *rs6000_warn_altivec_long_switch;
/* Allocation boundary (in *bits*) for the code of a function. */
#define FUNCTION_BOUNDARY 32
+/* Constants for alignment macros below. */
+/* APPLE LOCAL begin Macintosh alignment */
+#define RS6000_DOUBLE_ALIGNMENT 64
+#define RS6000_LONGLONG_ALIGNMENT 64
+#define RS6000_VECTOR_ALIGNMENT 128
+/* APPLE LOCAL end Macintosh alignment */
+
/* No data type wants to be aligned rounder than this. */
#define BIGGEST_ALIGNMENT 128
@@ -1162,8 +1209,7 @@ extern const char *rs6000_warn_altivec_long_switch;
= call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \
if (DEFAULT_ABI == ABI_DARWIN \
&& PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) \
- global_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
- = fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
= call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] \
= call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1; \
if (TARGET_ALTIVEC) \
@@ -1528,6 +1574,9 @@ extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */
makes the stack pointer a smaller address. */
#define STACK_GROWS_DOWNWARD
+/* Offsets recorded in opcodes are a multiple of this alignment factor. */
+#define DWARF_CIE_DATA_ALIGNMENT (-((int) (TARGET_32BIT ? 4 : 8)))
+
/* Define this if the nominal address of the stack frame
is at the high-address end of the local variables;
that is, each additional local variable allocated
@@ -1717,6 +1766,9 @@ typedef struct machine_function GTY(())
int sysv_varargs_p;
/* Flags if __builtin_return_address (n) with n >= 1 was used. */
int ra_needs_full_frame;
+ /* APPLE LOCAL volatile pic base reg in leaves */
+ /* Substitute PIC register in leaf functions */
+ int substitute_pic_base_reg;
/* Some local-dynamic symbol. */
const char *some_ld_name;
/* Whether the instruction chain has been scanned already. */
@@ -2015,7 +2067,6 @@ typedef struct rs6000_args
On the RS/6000, all integer constants are acceptable, most won't be valid
for particular insns, though. Only easy FP constants are
acceptable. */
-
#define LEGITIMATE_CONSTANT_P(X) \
(((GET_CODE (X) != CONST_DOUBLE \
&& GET_CODE (X) != CONST_VECTOR) \
@@ -2126,7 +2177,8 @@ typedef struct rs6000_args
#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
do { \
int win; \
- (X) = rs6000_legitimize_reload_address ((X), (MODE), (OPNUM), \
+ /* APPLE LOCAL pass reload addr by address */ \
+ (X) = rs6000_legitimize_reload_address (&(X), (MODE), (OPNUM), \
(int)(TYPE), (IND_LEVELS), &win); \
if ( win ) \
goto WIN; \
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index 29b36d6e4e5..d5f4f878b68 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -67,7 +67,7 @@
(const_string "integer"))
;; Length (in bytes).
-; '(pc)' in the following doesn't include the instruction itself; it is
+; '(pc)' in the following doesn't include the instruction itself; it is
; calculated as if the instruction had zero size.
(define_attr "length" ""
(if_then_else (eq_attr "type" "branch")
@@ -1632,7 +1632,7 @@
operands[3] = gen_reg_rtx (SImode);
operands[4] = gen_reg_rtx (SImode);
})
-
+
(define_expand "ffssi2"
[(set (match_dup 2)
(neg:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
@@ -1648,7 +1648,7 @@
operands[3] = gen_reg_rtx (SImode);
operands[4] = gen_reg_rtx (SImode);
})
-
+
(define_expand "mulsi3"
[(use (match_operand:SI 0 "gpc_reg_operand" ""))
(use (match_operand:SI 1 "gpc_reg_operand" ""))
@@ -1672,10 +1672,10 @@
"@
{muls|mullw} %0,%1,%2
{muli|mulli} %0,%1,%2"
- [(set (attr "type")
+ [(set (attr "type")
(cond [(match_operand:SI 2 "s8bit_cint_operand" "")
(const_string "imul3")
- (match_operand:SI 2 "short_cint_operand" "")
+ (match_operand:SI 2 "short_cint_operand" "")
(const_string "imul2")]
(const_string "imul")))])
@@ -1687,10 +1687,10 @@
"@
{muls|mullw} %0,%1,%2
{muli|mulli} %0,%1,%2"
- [(set (attr "type")
+ [(set (attr "type")
(cond [(match_operand:SI 2 "s8bit_cint_operand" "")
(const_string "imul3")
- (match_operand:SI 2 "short_cint_operand" "")
+ (match_operand:SI 2 "short_cint_operand" "")
(const_string "imul2")]
(const_string "imul")))])
@@ -2568,7 +2568,7 @@
(const_int 0)))]
"")
-;; Split a logical operation that we can't do in one insn into two insns,
+;; Split a logical operation that we can't do in one insn into two insns,
;; each of which does one 16-bit part. This is used by combine.
(define_split
@@ -4686,7 +4686,7 @@
"TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
[(const_int 0)]
"
-{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
+{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
operands[1], operands[2]);
DONE;
}")
@@ -4879,7 +4879,7 @@
(minus:DF (match_operand:DF 3 "gpc_reg_operand" "f")
(mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
(match_operand:DF 2 "gpc_reg_operand" "f"))))]
- "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
&& ! HONOR_SIGNED_ZEROS (DFmode)"
"{fnms|fnmsub} %0,%1,%2,%3"
[(set_attr "type" "dmul")])
@@ -4892,7 +4892,7 @@
[(set_attr "type" "dsqrt")])
;; The conditional move instructions allow us to perform max and min
-;; operations even when
+;; operations even when
(define_expand "maxdf3"
[(set (match_operand:DF 0 "gpc_reg_operand" "")
@@ -4920,7 +4920,7 @@
"TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS"
[(const_int 0)]
"
-{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
+{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
operands[1], operands[2]);
DONE;
}")
@@ -4991,7 +4991,11 @@
{
if (TARGET_POWERPC64)
{
- rtx mem = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+ /* APPLE LOCAL assign_stack_local_with_alias is used instead of
+ assign_stack_temp to get better scheduling, at the cost of some
+ stack space. */
+ rtx mem = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode),
+ GET_MODE_ALIGNMENT (DImode));
rtx t1 = gen_reg_rtx (DImode);
rtx t2 = gen_reg_rtx (DImode);
emit_insn (gen_floatsidf_ppc64 (operands[0], operands[1], mem, t1, t2));
@@ -5000,7 +5004,11 @@
operands[2] = force_reg (SImode, GEN_INT (0x43300000));
operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503601774854144\", DFmode));
- operands[4] = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0);
+ /* APPLE LOCAL assign_stack_local_with_alias is used instead of
+ assign_stack_temp to get better scheduling, at the cost of some
+ stack space. */
+ operands[4] = assign_stack_local_with_alias (DFmode, GET_MODE_SIZE (DFmode),
+ GET_MODE_ALIGNMENT (DFmode));
operands[5] = gen_reg_rtx (DFmode);
operands[6] = gen_reg_rtx (SImode);
}")
@@ -5046,7 +5054,7 @@
tmp = highword; highword = lowword; lowword = tmp;
}
- emit_insn (gen_xorsi3 (operands[6], operands[1],
+ emit_insn (gen_xorsi3 (operands[6], operands[1],
GEN_INT (~ (HOST_WIDE_INT) 0x7fffffff)));
emit_move_insn (gen_rtx_MEM (SImode, lowword), operands[6]);
emit_move_insn (gen_rtx_MEM (SImode, highword), operands[2]);
@@ -5073,7 +5081,11 @@
{
if (TARGET_POWERPC64)
{
- rtx mem = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+ /* APPLE LOCAL assign_stack_local_with_alias is used instead of
+ assign_stack_temp to get better scheduling, at the cost of some
+ stack space. */
+ rtx mem = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode),
+ GET_MODE_ALIGNMENT (DImode));
rtx t1 = gen_reg_rtx (DImode);
rtx t2 = gen_reg_rtx (DImode);
emit_insn (gen_floatunssidf_ppc64 (operands[0], operands[1], mem,
@@ -5083,7 +5095,11 @@
operands[2] = force_reg (SImode, GEN_INT (0x43300000));
operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503599627370496\", DFmode));
- operands[4] = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0);
+ /* APPLE LOCAL assign_stack_local_with_alias is used instead of
+ assign_stack_temp to get better scheduling, at the cost of some
+ stack space. */
+ operands[4] = assign_stack_local_with_alias (DFmode, GET_MODE_SIZE (DFmode),
+ GET_MODE_ALIGNMENT (DFmode));
operands[5] = gen_reg_rtx (DFmode);
}")
@@ -5141,7 +5157,11 @@
"
{
operands[2] = gen_reg_rtx (DImode);
- operands[3] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+ /* APPLE LOCAL assign_stack_local_with_alias is used instead of
+ assign_stack_temp to get better scheduling, at the cost of some
+ stack space. */
+ operands[3] = assign_stack_local_with_alias (DImode, GET_MODE_SIZE (DImode),
+ GET_MODE_ALIGNMENT (DImode));
}")
(define_insn "*fix_truncdfsi2_internal"
@@ -5642,7 +5662,7 @@
(define_insn "*ashrdisi3_noppc64"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (subreg:SI (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (subreg:SI (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
(const_int 32)) 4))]
"TARGET_32BIT && !TARGET_POWERPC64"
"*
@@ -5652,7 +5672,7 @@
else
return \"mr %0,%1\";
}"
- [(set_attr "length" "4")])
+ [(set_attr "length" "4")])
;; PowerPC64 DImode operations.
@@ -6032,15 +6052,15 @@
(define_expand "ctzdi2"
[(set (match_dup 2)
(neg:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
- (parallel [(set (match_dup 3) (and:DI (match_dup 1)
- (match_dup 2)))
+ (parallel [(set (match_dup 3) (and:DI (match_dup 1)
+ (match_dup 2)))
(clobber (scratch:CC))])
(set (match_dup 4) (clz:DI (match_dup 3)))
(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(minus:DI (const_int 63) (match_dup 4)))]
"TARGET_POWERPC64"
{
- operands[2] = gen_reg_rtx (DImode);
+ operands[2] = gen_reg_rtx (DImode);
operands[3] = gen_reg_rtx (DImode);
operands[4] = gen_reg_rtx (DImode);
})
@@ -6048,15 +6068,15 @@
(define_expand "ffsdi2"
[(set (match_dup 2)
(neg:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
- (parallel [(set (match_dup 3) (and:DI (match_dup 1)
- (match_dup 2)))
+ (parallel [(set (match_dup 3) (and:DI (match_dup 1)
+ (match_dup 2)))
(clobber (scratch:CC))])
(set (match_dup 4) (clz:DI (match_dup 3)))
(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(minus:DI (const_int 64) (match_dup 4)))]
"TARGET_POWERPC64"
{
- operands[2] = gen_reg_rtx (DImode);
+ operands[2] = gen_reg_rtx (DImode);
operands[3] = gen_reg_rtx (DImode);
operands[4] = gen_reg_rtx (DImode);
})
@@ -6656,7 +6676,7 @@
"TARGET_POWERPC64"
"sld%I2 %0,%1,%H2"
[(set_attr "length" "8")])
-
+
(define_insn "*ashldi3_internal2"
[(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
(compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
@@ -6669,7 +6689,7 @@
#"
[(set_attr "type" "delayed_compare")
(set_attr "length" "4,8")])
-
+
(define_split
[(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
(compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
@@ -7335,7 +7355,7 @@
(const_int 0)))]
"")
-;; Split a logical operation that we can't do in one insn into two insns,
+;; Split a logical operation that we can't do in one insn into two insns,
;; each of which does one 16-bit part. This is used by combine.
(define_split
@@ -7349,7 +7369,7 @@
"
{
rtx i3,i4;
-
+
if (GET_CODE (operands[2]) == CONST_DOUBLE)
{
HOST_WIDE_INT value = CONST_DOUBLE_LOW (operands[2]);
@@ -7577,7 +7597,7 @@
;; Used by sched, shorten_branches and final when the GOT pseudo reg
;; didn't get allocated to a hard register.
-(define_split
+(define_split
[(set (match_operand:SI 0 "gpc_reg_operand" "")
(unspec:SI [(match_operand:SI 1 "got_no_const_operand" "")
(match_operand:SI 2 "memory_operand" "")]
@@ -7640,12 +7660,16 @@
return \"ld %0,lo16(%2)(%1)\";
else
{
- operands2[3] = gen_rtx_REG (SImode, RS6000_PIC_OFFSET_TABLE_REGNUM);
+ /* APPLE LOCAL volatile pic base reg in leaves */
+ operands2[3] = gen_rtx_REG (SImode,
+ (cfun->machine->substitute_pic_base_reg == -1
+ ? RS6000_PIC_OFFSET_TABLE_REGNUM
+ : cfun->machine->substitute_pic_base_reg));
output_asm_insn (\"{l|lwz} %0,lo16(%2)(%1)\", operands);
#if TARGET_MACHO
if (MACHO_DYNAMIC_NO_PIC_P)
output_asm_insn (\"{liu|lis} %L0,ha16(%2+4)\", operands);
- else
+ else
/* We cannot rely on ha16(low half)==ha16(high half), alas,
although in practice it almost always is. */
output_asm_insn (\"{cau|addis} %L0,%3,ha16(%2+4)\", operands2);
@@ -8254,7 +8278,7 @@
emit_move_insn (simplify_gen_subreg (DFmode, operands[0], TFmode, lo_word),
operands[2]);
DONE;
-})
+})
(define_expand "extendsftf2"
[(set (match_operand:TF 0 "nonimmediate_operand" "")
@@ -8474,7 +8498,7 @@
(define_split
[(set (match_operand:DI 0 "nonimmediate_operand" "")
(match_operand:DI 1 "input_operand" ""))]
- "reload_completed && !TARGET_POWERPC64
+ "reload_completed && !TARGET_POWERPC64
&& gpr_or_gpr_p (operands[0], operands[1])"
[(pc)]
{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
@@ -8634,7 +8658,7 @@
[(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,m,????r,????r,????r")
(match_operand:TI 1 "reg_or_mem_operand" "r,r,r,Q,m"))
(clobber (match_scratch:SI 2 "=q,q#X,X,X,X"))]
- "TARGET_POWER && ! TARGET_POWERPC64
+ "TARGET_POWER && ! TARGET_POWERPC64
&& (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))"
"*
{
@@ -8682,7 +8706,7 @@
case 3:
/* If the address is not used in the output, we can use lsi. Otherwise,
fall through to generating four loads. */
- if (TARGET_STRING
+ if (TARGET_STRING
&& ! reg_overlap_mentioned_p (operands[0], operands[1]))
return \"{lsi|lswi} %0,%P1,16\";
/* ... fall through ... */
@@ -8698,8 +8722,8 @@
"TARGET_POWERPC64 && (gpc_reg_operand (operands[0], TImode)
|| gpc_reg_operand (operands[1], TImode))"
"@
- #
- #
+ #
+ #
#"
[(set_attr "type" "*,load,store")])
@@ -9831,7 +9855,7 @@
if (current_function_limit_stack)
{
rtx available;
- available = expand_binop (Pmode, sub_optab,
+ available = expand_binop (Pmode, sub_optab,
stack_pointer_rtx, stack_limit_rtx,
NULL_RTX, 1, OPTAB_WIDEN);
emit_insn (gen_cond_trap (LTU, available, operands[1], const0_rtx));
@@ -10277,6 +10301,12 @@
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn (\"creqv 6,6,6\", operands);
+/* APPLE LOCAL -mlongcall */
+#ifdef RS6000_LONG_BRANCH
+ if (!flag_pic)
+ return output_call(insn, operands[0], 0);
+ else
+#endif
return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z0@local\" : \"bl %z0\";
}"
[(set_attr "type" "branch")
@@ -10392,7 +10422,7 @@
(match_operand 1 "" "g"))
(use (match_operand:SI 2 "immediate_operand" "O"))
(clobber (match_scratch:SI 3 "=l"))]
- "TARGET_64BIT
+ "TARGET_64BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[2]) & CALL_LONG) == 0"
"bl %z0\;%."
@@ -10446,7 +10476,7 @@
(match_operand 2 "" "g")))
(use (match_operand:SI 3 "immediate_operand" "O"))
(clobber (match_scratch:SI 4 "=l"))]
- "TARGET_64BIT
+ "TARGET_64BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[3]) & CALL_LONG) == 0"
"bl %z1\;%."
@@ -10459,11 +10489,12 @@
;; operands[2] is the value FUNCTION_ARG returns for the VOID argument
;; which indicates how to set cr1
+;; APPLE LOCAL separate cl into c,*l; switch and attr's expanded to match
(define_insn "*call_indirect_nonlocal_sysv"
- [(call (mem:SI (match_operand:SI 0 "register_operand" "cl,cl"))
- (match_operand 1 "" "g,g"))
- (use (match_operand:SI 2 "immediate_operand" "O,n"))
- (clobber (match_scratch:SI 3 "=l,l"))]
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "c,*l,c,*l"))
+ (match_operand 1 "" "g,g,g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 3 "=l,l,l,l"))]
"DEFAULT_ABI == ABI_V4
|| DEFAULT_ABI == ABI_DARWIN"
{
@@ -10475,8 +10506,8 @@
return "b%T0l";
}
- [(set_attr "type" "jmpreg,jmpreg")
- (set_attr "length" "4,8")])
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
(define_insn "*call_nonlocal_sysv"
[(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s,s"))
@@ -10497,17 +10528,18 @@
return output_call(insn, operands, 0, 2);
#else
return (DEFAULT_ABI == ABI_V4 && flag_pic) ? "bl %z0@plt" : "bl %z0";
-#endif
+#endif
}
[(set_attr "type" "branch,branch")
(set_attr "length" "4,8")])
+;; APPLE LOCAL separate cl into c,*l; switch and attr's expanded to match
(define_insn "*call_value_indirect_nonlocal_sysv"
[(set (match_operand 0 "" "")
- (call (mem:SI (match_operand:SI 1 "register_operand" "cl,cl"))
- (match_operand 2 "" "g,g")))
- (use (match_operand:SI 3 "immediate_operand" "O,n"))
- (clobber (match_scratch:SI 4 "=l,l"))]
+ (call (mem:SI (match_operand:SI 1 "register_operand" "c,*l,c,*l"))
+ (match_operand 2 "" "g,g,g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,O,n,n"))
+ (clobber (match_scratch:SI 4 "=l,l,l,l"))]
"DEFAULT_ABI == ABI_V4
|| DEFAULT_ABI == ABI_DARWIN"
{
@@ -10519,8 +10551,8 @@
return "b%T1l";
}
- [(set_attr "type" "jmpreg,jmpreg")
- (set_attr "length" "4,8")])
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
(define_insn "*call_value_nonlocal_sysv"
[(set (match_operand 0 "" "")
@@ -10542,7 +10574,7 @@
return output_call(insn, operands, 1, 3);
#else
return (DEFAULT_ABI == ABI_V4 && flag_pic) ? "bl %z1@plt" : "bl %z1";
-#endif
+#endif
}
[(set_attr "type" "branch,branch")
(set_attr "length" "4,8")])
@@ -10575,6 +10607,52 @@
DONE;
}")
+;; APPLE LOCAL sibcall patterns
+;; APPLE MERGE modify FSF patterns below instead?
+;; this and similar patterns must be marked as using LR, otherwise
+;; dataflow will try to delete the store into it. This is true
+;; even when the actual reg to jump to is in CTR, when LR was
+;; saved and restored around the PIC-setting BCL.
+(define_insn "*sibcall_symbolic"
+ [(call (mem:SI (match_operand:SI 0 "call_operand" "s,c"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_scratch:SI 3 "=l,l"))
+ (return)]
+ "! TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"b %z0\";
+ case 1: return \"b%T0\";
+ default: abort();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_symbolic"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "call_operand" "s,c"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "" ""))
+ (use (match_scratch:SI 4 "=l,l"))
+ (return)]
+ "! TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"b %z1\";
+ case 1: return \"b%T1\";
+ default: abort();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+;; APPLE LOCAL end sibcall patterns
+
;; sibling call patterns
(define_expand "sibcall"
[(parallel [(call (mem:SI (match_operand 0 "address_operand" ""))
@@ -10704,7 +10782,7 @@
(use (match_operand:SI 2 "immediate_operand" "O"))
(use (match_operand:SI 3 "register_operand" "l"))
(return)]
- "TARGET_64BIT
+ "TARGET_64BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[2]) & CALL_LONG) == 0"
"b %z0"
@@ -10732,7 +10810,7 @@
(use (match_operand:SI 3 "immediate_operand" "O"))
(use (match_operand:SI 4 "register_operand" "l"))
(return)]
- "TARGET_64BIT
+ "TARGET_64BIT
&& DEFAULT_ABI == ABI_AIX
&& (INTVAL (operands[3]) & CALL_LONG) == 0"
"b %z1"
@@ -11009,11 +11087,11 @@
[(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
""
"
-{
+{
if (! rs6000_compare_fp_p)
FAIL;
- rs6000_emit_sCOND (NE, operands[0]);
+ rs6000_emit_sCOND (NE, operands[0]);
DONE;
}")
@@ -11041,7 +11119,7 @@
&& (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
FAIL;
- rs6000_emit_sCOND (GT, operands[0]);
+ rs6000_emit_sCOND (GT, operands[0]);
DONE;
}")
@@ -11055,7 +11133,7 @@
&& (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
FAIL;
- rs6000_emit_sCOND (LE, operands[0]);
+ rs6000_emit_sCOND (LE, operands[0]);
DONE;
}")
@@ -11065,11 +11143,11 @@
""
"
{
- if (! rs6000_compare_fp_p
+ if (! rs6000_compare_fp_p
&& (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
FAIL;
- rs6000_emit_sCOND (LT, operands[0]);
+ rs6000_emit_sCOND (LT, operands[0]);
DONE;
}")
@@ -13791,7 +13869,7 @@
}")
(define_expand "tablejumpdi"
- [(set (match_dup 4)
+ [(set (match_dup 4)
(sign_extend:DI (match_operand:SI 0 "lwa_operand" "rm")))
(set (match_dup 3)
(plus:DI (match_dup 4)
@@ -14363,7 +14441,7 @@
(define_insn "movesi_from_cr"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
- (unspec:SI [(reg:CC 68) (reg:CC 69) (reg:CC 70) (reg:CC 71)
+ (unspec:SI [(reg:CC 68) (reg:CC 69) (reg:CC 70) (reg:CC 71)
(reg:CC 72) (reg:CC 73) (reg:CC 74) (reg:CC 75)]
UNSPEC_MOVESI_FROM_CR))]
""
@@ -14376,7 +14454,7 @@
(match_operand:SI 2 "gpc_reg_operand" "r"))])]
"TARGET_MULTIPLE"
"{stm|stmw} %2,%1")
-
+
(define_insn "*save_fpregs_si"
[(match_parallel 0 "any_operand"
[(clobber (match_operand:SI 1 "register_operand" "=l"))
@@ -14388,6 +14466,35 @@
[(set_attr "type" "branch")
(set_attr "length" "4")])
+/* APPLE LOCAL begin unnamed*/
+(define_insn "*save_fpregs_with_label_si"
+ [(match_parallel 0 "any_operand"
+ [(clobber (match_operand:SI 1 "register_operand" "=l"))
+ (use (match_operand:SI 2 "call_operand" "s"))
+ (use (match_operand:SI 3 "" ""))
+ (set (match_operand:DF 4 "memory_operand" "=m")
+ (match_operand:DF 5 "gpc_reg_operand" "f"))])]
+ "TARGET_32BIT"
+ "*
+#if TARGET_MACHO
+ char *picbase = machopic_function_base_name ();
+ char *tmp;
+ operands[3] = gen_rtx_SYMBOL_REF (Pmode, ggc_alloc_string (picbase, -1));
+ if (TARGET_LONG_BRANCH)
+ {
+ tmp = ggc_alloc (strlen (XSTR (operands[2], 0)) + strlen (XSTR (operands[3], 0)) + 2);
+ strcpy (tmp, output_call(insn, operands[2], 2, \"\"));
+ strcat (tmp, \"\\n%3:\");
+ return tmp;
+ }
+ else
+#endif
+ return \"bl %z2\\n%3:\";
+"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+/* APPLE LOCAL end unnamed */
+
(define_insn "*save_fpregs_di"
[(match_parallel 0 "any_operand"
[(clobber (match_operand:DI 1 "register_operand" "=l"))
@@ -14452,7 +14559,7 @@
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand 2 "immediate_operand" "n")]
UNSPEC_MOVESI_TO_CR))]
- "GET_CODE (operands[0]) == REG
+ "GET_CODE (operands[0]) == REG
&& CR_REGNO_P (REGNO (operands[0]))
&& GET_CODE (operands[2]) == CONST_INT
&& INTVAL (operands[2]) == 1 << (75 - REGNO (operands[0]))"
@@ -14469,7 +14576,7 @@
(match_operand:SI 2 "memory_operand" "m"))])]
"TARGET_MULTIPLE"
"{lm|lmw} %1,%2")
-
+
(define_insn "*return_internal_si"
[(return)
(use (match_operand:SI 0 "register_operand" "lc"))]
@@ -14495,7 +14602,14 @@
(set (match_operand:DF 3 "gpc_reg_operand" "=f")
(match_operand:DF 4 "memory_operand" "m"))])]
"TARGET_32BIT"
- "b %z2")
+ {
+#if TARGET_MACHO
+ if (TARGET_LONG_BRANCH)
+ return output_call(insn, operands[2], 2, "");
+ else
+#endif
+ return "b %z2";
+ })
(define_insn "*return_and_restore_fpregs_di"
[(match_parallel 0 "any_operand"
@@ -14505,7 +14619,14 @@
(set (match_operand:DF 3 "gpc_reg_operand" "=f")
(match_operand:DF 4 "memory_operand" "m"))])]
"TARGET_64BIT"
- "b %z2")
+ {
+#if TARGET_MACHO
+ if (TARGET_LONG_BRANCH)
+ return output_call(insn, operands[2], 2, "");
+ else
+#endif
+ return "b %z2";
+ })
; This is used in compiling the unwind routines.
(define_expand "eh_return"
diff --git a/gcc/config/rs6000/sysv4.h b/gcc/config/rs6000/sysv4.h
index 1e0ac3707f6..88bf8196b17 100644
--- a/gcc/config/rs6000/sysv4.h
+++ b/gcc/config/rs6000/sysv4.h
@@ -434,6 +434,13 @@ do { \
#define BSS_SECTION_ASM_OP "\t.section\t\".bss\""
+/* APPLE LOCAL begin hot/cold partitioning */
+#define HOT_TEXT_SECTION_NAME ".text"
+#define NORMAL_TEXT_SECTION_NAME ".text"
+#define UNLIKELY_EXECUTED_TEXT_SECTION_NAME ".text.unlikely"
+#define SECTION_FORMAT_STRING ".section\t\"%s\"\n\t.align 2\n"
+/* APPLE LOCAL end hot/cold partitioning */
+
/* Override elfos.h definition. */
#undef INIT_SECTION_ASM_OP
#define INIT_SECTION_ASM_OP "\t.section\t\".init\",\"ax\""
diff --git a/gcc/config/rs6000/t-darwin b/gcc/config/rs6000/t-darwin
index 185bb00eed2..af710ed7cef 100644
--- a/gcc/config/rs6000/t-darwin
+++ b/gcc/config/rs6000/t-darwin
@@ -1,7 +1,11 @@
+# APPLE LOCAL begin AltiVec
# Add trampoline and long double support to libgcc.
LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-tramp.asm \
- $(srcdir)/config/rs6000/darwin-ldouble.c
+ $(srcdir)/config/rs6000/darwin-fpsave.asm \
+ $(srcdir)/config/rs6000/darwin-ldouble.c
+# Enable AltiVec instructions when assembling the aforementioned .asm files.
# For libgcc, we always want 128-bit long double, since a libgcc built with
# that will work without it.
-TARGET_LIBGCC2_CFLAGS = -mlong-double-128
+TARGET_LIBGCC2_CFLAGS = -mlong-double-128 -Wa,-force_cpusubtype_ALL
+# APPLE LOCAL end AltiVec
diff --git a/gcc/config/rs6000/t-rs6000 b/gcc/config/rs6000/t-rs6000
index 9546461e57d..9cc60036c00 100644
--- a/gcc/config/rs6000/t-rs6000
+++ b/gcc/config/rs6000/t-rs6000
@@ -18,6 +18,7 @@ rs6000-c.o: $(srcdir)/config/rs6000/rs6000-c.c \
# The rs6000 backend doesn't cause warnings in these files.
insn-conditions.o-warn =
+
# The files below trigger warnings in tree-ssa because of the gimplifier
# emitting code that confuse the compiler into thinking that some variables
# are used uninitialized.
diff --git a/gcc/config/rs6000/t-rtems b/gcc/config/rs6000/t-rtems
deleted file mode 100644
index 364a22d2278..00000000000
--- a/gcc/config/rs6000/t-rtems
+++ /dev/null
@@ -1,86 +0,0 @@
-# Multilibs for powerpc RTEMS targets.
-
-MULTILIB_OPTIONS = \
-mcpu=403/mcpu=505/mcpu=601/mcpu=602/mcpu=603/mcpu=603e/mcpu=604/mcpu=750/mcpu=821/mcpu=860 \
-Dmpc509/Dmpc8260 \
-D_OLD_EXCEPTIONS \
-msoft-float
-
-MULTILIB_DIRNAMES = \
-m403 m505 m601 m602 m603 m603e m604 m750 m821 m860 \
-mpc509 \
-mpc8260 \
-roe \
-nof
-
-MULTILIB_EXTRA_OPTS = mrelocatable-lib mno-eabi mstrict-align
-
-# MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT}
-MULTILIB_MATCHES = ${MULTILIB_MATCHES_ENDIAN} \
- ${MULTILIB_MATCHES_SYSV} \
- mcpu?505/Dmpc505=mcpu?505/Dmpc509
-
-#
-# RTEMS old/new-exceptions handling
-#
-# old-exception processing is depredicated, therefore
-#
-# * Cpu-variants supporting new exception processing are build
-# with new exception processing only
-# * Cpu-variants not having been ported to new exception processing are
-# build with old and new exception processing
-#
-
-# Cpu-variants supporting new exception processing only
-MULTILIB_NEW_EXCEPTIONS_ONLY = \
-*mcpu=604*/*D_OLD_EXCEPTIONS* \
-*mcpu=750*/*D_OLD_EXCEPTIONS* \
-*mcpu=821*/*D_OLD_EXCEPTIONS* \
-*Dmpc8260*/*D_OLD_EXCEPTIONS* \
-*mcpu=860*/*D_OLD_EXCEPTIONS*
-
-# Soft-float only, default implies msoft-float
-# NOTE: Must match with MULTILIB_MATCHES_FLOAT and MULTILIB_MATCHES
-MULTILIB_SOFTFLOAT_ONLY = \
-mcpu=403/*msoft-float* \
-mcpu=821/*msoft-float* \
-mcpu=860/*msoft-float*
-
-# Hard-float only, take out msoft-float
-MULTILIB_HARDFLOAT_ONLY = \
-mcpu=505/*msoft-float*
-
-MULTILIB_EXCEPTIONS =
-
-# Disallow -D_OLD_EXCEPTIONS without other options
-MULTILIB_EXCEPTIONS += D_OLD_EXCEPTIONS*
-
-# Disallow -Dppc and -Dmpc without other options
-MULTILIB_EXCEPTIONS += Dppc* Dmpc*
-
-MULTILIB_EXCEPTIONS += \
-${MULTILIB_NEW_EXCEPTIONS_ONLY} \
-${MULTILIB_SOFTFLOAT_ONLY} \
-${MULTILIB_HARDFLOAT_ONLY}
-
-# Special rules
-# Take out all variants we don't want
-MULTILIB_EXCEPTIONS += mcpu=403/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=403/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=505/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=505/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=601/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=601/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=602/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=602/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=603/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=603/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=603e/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=604/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=604/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=750/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=750/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=821/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=821/Dmpc8260*
-MULTILIB_EXCEPTIONS += mcpu=860/Dmpc509*
-MULTILIB_EXCEPTIONS += mcpu=860/Dmpc8260*
diff --git a/gcc/config/rs6000/vec.h b/gcc/config/rs6000/vec.h
new file mode 100644
index 00000000000..56e8786f25b
--- /dev/null
+++ b/gcc/config/rs6000/vec.h
@@ -0,0 +1,4515 @@
+/* APPLE LOCAL file AltiVec */
+/* This file is generated by ops-to-gp. Do not edit. */
+
+/* To regenerate execute:
+ ops-to-gp -gcc vec.ops builtin.ops
+ with the current directory being gcc/config/rs6000. */
+
+static const struct builtin B1_vec_abs = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 11, "vec_abs:1", "4", CODE_FOR_xfx_perm, B_UID(0) };
+static const struct builtin B2_vec_abs = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 11, "vec_abs:2", "2", CODE_FOR_xfx_perm, B_UID(1) };
+static const struct builtin B3_vec_abs = { { &T_vec_s32, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 11, "vec_abs:3", "3", CODE_FOR_xfx_perm, B_UID(2) };
+static const struct builtin B4_vec_abs = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s8, 1, FALSE, FALSE, 11, "vec_abs:4", "1", CODE_FOR_xfx_perm, B_UID(3) };
+static const struct builtin B1_vec_abss = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 11, "vec_abss:1", "6", CODE_FOR_xfx_perm, B_UID(4) };
+static const struct builtin B2_vec_abss = { { &T_vec_s32, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 11, "vec_abss:2", "7", CODE_FOR_xfx_perm, B_UID(5) };
+static const struct builtin B3_vec_abss = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s8, 1, FALSE, FALSE, 11, "vec_abss:3", "5", CODE_FOR_xfx_perm, B_UID(6) };
+static const struct builtin B1_vec_vadduhm = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:1", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(7) };
+static const struct builtin B2_vec_vadduhm = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:2", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(8) };
+static const struct builtin B1_vec_vadduwm = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:1", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(9) };
+static const struct builtin B2_vec_vadduwm = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:2", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(10) };
+static const struct builtin B1_vec_vaddubm = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:1", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(11) };
+static const struct builtin B2_vec_vaddubm = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:2", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(12) };
+static const struct builtin B_vec_vaddfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vaddfp", "*vaddfp", CODE_FOR_xfxx_fp, B_UID(13) };
+static const struct builtin B3_vec_vadduhm = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:3", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(14) };
+static const struct builtin B4_vec_vadduhm = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vadduhm:4", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(15) };
+static const struct builtin B3_vec_vadduwm = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:3", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(16) };
+static const struct builtin B4_vec_vadduwm = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vadduwm:4", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(17) };
+static const struct builtin B3_vec_vaddubm = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:3", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(18) };
+static const struct builtin B4_vec_vaddubm = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddubm:4", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(19) };
+static const struct builtin B5_vec_vadduhm = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:5", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(20) };
+static const struct builtin B6_vec_vadduhm = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhm:6", "*vadduhm", CODE_FOR_xfxx_simple, B_UID(21) };
+static const struct builtin B5_vec_vadduwm = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:5", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(22) };
+static const struct builtin B6_vec_vadduwm = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduwm:6", "*vadduwm", CODE_FOR_xfxx_simple, B_UID(23) };
+static const struct builtin B5_vec_vaddubm = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:5", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(24) };
+static const struct builtin B6_vec_vaddubm = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubm:6", "*vaddubm", CODE_FOR_xfxx_simple, B_UID(25) };
+static const struct builtin B_vec_vaddcuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vaddcuw", "*vaddcuw", CODE_FOR_xfxx_simple, B_UID(26) };
+static const struct builtin B1_vec_vaddshs = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:1", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(27) };
+static const struct builtin B1_vec_vadduhs = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:1", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(28) };
+static const struct builtin B1_vec_vaddsws = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:1", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(29) };
+static const struct builtin B1_vec_vadduws = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:1", "*vadduws", CODE_FOR_xfxx_simple, B_UID(30) };
+static const struct builtin B1_vec_vaddsbs = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:1", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(31) };
+static const struct builtin B1_vec_vaddubs = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:1", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(32) };
+static const struct builtin B2_vec_vaddshs = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:2", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(33) };
+static const struct builtin B3_vec_vaddshs = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vaddshs:3", "*vaddshs", CODE_FOR_xfxx_simple, B_UID(34) };
+static const struct builtin B2_vec_vaddsws = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:2", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(35) };
+static const struct builtin B3_vec_vaddsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vaddsws:3", "*vaddsws", CODE_FOR_xfxx_simple, B_UID(36) };
+static const struct builtin B2_vec_vaddsbs = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:2", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(37) };
+static const struct builtin B3_vec_vaddsbs = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vaddsbs:3", "*vaddsbs", CODE_FOR_xfxx_simple, B_UID(38) };
+static const struct builtin B2_vec_vadduhs = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:2", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(39) };
+static const struct builtin B3_vec_vadduhs = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vadduhs:3", "*vadduhs", CODE_FOR_xfxx_simple, B_UID(40) };
+static const struct builtin B2_vec_vadduws = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:2", "*vadduws", CODE_FOR_xfxx_simple, B_UID(41) };
+static const struct builtin B3_vec_vadduws = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vadduws:3", "*vadduws", CODE_FOR_xfxx_simple, B_UID(42) };
+static const struct builtin B2_vec_vaddubs = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:2", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(43) };
+static const struct builtin B3_vec_vaddubs = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vaddubs:3", "*vaddubs", CODE_FOR_xfxx_simple, B_UID(44) };
+static const struct builtin B1_vec_all_eq = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:1", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(45) };
+static const struct builtin B2_vec_all_eq = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:2", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(46) };
+static const struct builtin B3_vec_all_eq = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:3", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(47) };
+static const struct builtin B4_vec_all_eq = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:4", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(48) };
+static const struct builtin B5_vec_all_eq = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:5", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(49) };
+static const struct builtin B6_vec_all_eq = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:6", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(50) };
+static const struct builtin B7_vec_all_eq = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:7", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(51) };
+static const struct builtin B8_vec_all_eq = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:8", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(52) };
+static const struct builtin B9_vec_all_eq = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:9", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(53) };
+static const struct builtin B10_vec_all_eq = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:10", "*vcmpeqfp.", CODE_FOR_j_24_t_fxx_simple, B_UID(54) };
+static const struct builtin B11_vec_all_eq = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:11", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(55) };
+static const struct builtin B12_vec_all_eq = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:12", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(56) };
+static const struct builtin B13_vec_all_eq = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:13", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(57) };
+static const struct builtin B14_vec_all_eq = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:14", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(58) };
+static const struct builtin B15_vec_all_eq = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:15", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(59) };
+static const struct builtin B16_vec_all_eq = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:16", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(60) };
+static const struct builtin B17_vec_all_eq = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:17", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(61) };
+static const struct builtin B18_vec_all_eq = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:18", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(62) };
+static const struct builtin B19_vec_all_eq = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:19", "*vcmpequh.", CODE_FOR_j_24_t_fxx_simple, B_UID(63) };
+static const struct builtin B20_vec_all_eq = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:20", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(64) };
+static const struct builtin B21_vec_all_eq = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:21", "*vcmpequw.", CODE_FOR_j_24_t_fxx_simple, B_UID(65) };
+static const struct builtin B22_vec_all_eq = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:22", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(66) };
+static const struct builtin B23_vec_all_eq = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_eq:23", "*vcmpequb.", CODE_FOR_j_24_t_fxx_simple, B_UID(67) };
+static const struct builtin B1_vec_all_ge = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:1", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(68) };
+static const struct builtin B2_vec_all_ge = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:2", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(69) };
+static const struct builtin B3_vec_all_ge = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:3", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(70) };
+static const struct builtin B4_vec_all_ge = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:4", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(71) };
+static const struct builtin B5_vec_all_ge = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:5", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(72) };
+static const struct builtin B6_vec_all_ge = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:6", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(73) };
+static const struct builtin B7_vec_all_ge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_ge:7", "*vcmpgefp.", CODE_FOR_j_24_t_fxx_simple, B_UID(74) };
+static const struct builtin B8_vec_all_ge = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:8", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(75) };
+static const struct builtin B9_vec_all_ge = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:9", "*vcmpgtsh.", CODE_FOR_j_26_t_frxx_simple, B_UID(76) };
+static const struct builtin B10_vec_all_ge = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:10", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(77) };
+static const struct builtin B11_vec_all_ge = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:11", "*vcmpgtsw.", CODE_FOR_j_26_t_frxx_simple, B_UID(78) };
+static const struct builtin B12_vec_all_ge = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:12", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(79) };
+static const struct builtin B13_vec_all_ge = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:13", "*vcmpgtsb.", CODE_FOR_j_26_t_frxx_simple, B_UID(80) };
+static const struct builtin B14_vec_all_ge = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:14", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(81) };
+static const struct builtin B15_vec_all_ge = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:15", "*vcmpgtuh.", CODE_FOR_j_26_t_frxx_simple, B_UID(82) };
+static const struct builtin B16_vec_all_ge = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:16", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(83) };
+static const struct builtin B17_vec_all_ge = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:17", "*vcmpgtuw.", CODE_FOR_j_26_t_frxx_simple, B_UID(84) };
+static const struct builtin B18_vec_all_ge = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:18", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(85) };
+static const struct builtin B19_vec_all_ge = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_ge:19", "*vcmpgtub.", CODE_FOR_j_26_t_frxx_simple, B_UID(86) };
+static const struct builtin B1_vec_all_gt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:1", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(87) };
+static const struct builtin B2_vec_all_gt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:2", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(88) };
+static const struct builtin B3_vec_all_gt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:3", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(89) };
+static const struct builtin B4_vec_all_gt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:4", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(90) };
+static const struct builtin B5_vec_all_gt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:5", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(91) };
+static const struct builtin B6_vec_all_gt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:6", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(92) };
+static const struct builtin B7_vec_all_gt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:7", "*vcmpgtfp.", CODE_FOR_j_24_t_fxx_simple, B_UID(93) };
+static const struct builtin B8_vec_all_gt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:8", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(94) };
+static const struct builtin B9_vec_all_gt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:9", "*vcmpgtsh.", CODE_FOR_j_24_t_fxx_simple, B_UID(95) };
+static const struct builtin B10_vec_all_gt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:10", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(96) };
+static const struct builtin B11_vec_all_gt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:11", "*vcmpgtsw.", CODE_FOR_j_24_t_fxx_simple, B_UID(97) };
+static const struct builtin B12_vec_all_gt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:12", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(98) };
+static const struct builtin B13_vec_all_gt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:13", "*vcmpgtsb.", CODE_FOR_j_24_t_fxx_simple, B_UID(99) };
+static const struct builtin B14_vec_all_gt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:14", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(100) };
+static const struct builtin B15_vec_all_gt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:15", "*vcmpgtuh.", CODE_FOR_j_24_t_fxx_simple, B_UID(101) };
+static const struct builtin B16_vec_all_gt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:16", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(102) };
+static const struct builtin B17_vec_all_gt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:17", "*vcmpgtuw.", CODE_FOR_j_24_t_fxx_simple, B_UID(103) };
+static const struct builtin B18_vec_all_gt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:18", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(104) };
+static const struct builtin B19_vec_all_gt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24t, 2, FALSE, FALSE, 0, "vec_all_gt:19", "*vcmpgtub.", CODE_FOR_j_24_t_fxx_simple, B_UID(105) };
+static const struct builtin B_vec_all_in = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_in", "*vcmpbfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(106) };
+static const struct builtin B1_vec_all_le = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:1", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(107) };
+static const struct builtin B2_vec_all_le = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:2", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(108) };
+static const struct builtin B3_vec_all_le = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:3", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(109) };
+static const struct builtin B4_vec_all_le = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:4", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(110) };
+static const struct builtin B5_vec_all_le = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:5", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(111) };
+static const struct builtin B6_vec_all_le = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:6", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(112) };
+static const struct builtin B7_vec_all_le = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_le:7", "*vcmpgefp.", CODE_FOR_j_24_t_frxx_simple, B_UID(113) };
+static const struct builtin B8_vec_all_le = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:8", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(114) };
+static const struct builtin B9_vec_all_le = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:9", "*vcmpgtsh.", CODE_FOR_j_26_t_fxx_simple, B_UID(115) };
+static const struct builtin B10_vec_all_le = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:10", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(116) };
+static const struct builtin B11_vec_all_le = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:11", "*vcmpgtsw.", CODE_FOR_j_26_t_fxx_simple, B_UID(117) };
+static const struct builtin B12_vec_all_le = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:12", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(118) };
+static const struct builtin B13_vec_all_le = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:13", "*vcmpgtsb.", CODE_FOR_j_26_t_fxx_simple, B_UID(119) };
+static const struct builtin B14_vec_all_le = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:14", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(120) };
+static const struct builtin B15_vec_all_le = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:15", "*vcmpgtuh.", CODE_FOR_j_26_t_fxx_simple, B_UID(121) };
+static const struct builtin B16_vec_all_le = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:16", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(122) };
+static const struct builtin B17_vec_all_le = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:17", "*vcmpgtuw.", CODE_FOR_j_26_t_fxx_simple, B_UID(123) };
+static const struct builtin B18_vec_all_le = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:18", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(124) };
+static const struct builtin B19_vec_all_le = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_le:19", "*vcmpgtub.", CODE_FOR_j_26_t_fxx_simple, B_UID(125) };
+static const struct builtin B1_vec_all_lt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:1", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(126) };
+static const struct builtin B2_vec_all_lt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:2", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(127) };
+static const struct builtin B3_vec_all_lt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:3", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(128) };
+static const struct builtin B4_vec_all_lt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:4", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(129) };
+static const struct builtin B5_vec_all_lt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:5", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(130) };
+static const struct builtin B6_vec_all_lt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:6", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(131) };
+static const struct builtin B7_vec_all_lt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:7", "*vcmpgtfp.", CODE_FOR_j_24_t_frxx_simple, B_UID(132) };
+static const struct builtin B8_vec_all_lt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:8", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(133) };
+static const struct builtin B9_vec_all_lt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:9", "*vcmpgtsh.", CODE_FOR_j_24_t_frxx_simple, B_UID(134) };
+static const struct builtin B10_vec_all_lt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:10", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(135) };
+static const struct builtin B11_vec_all_lt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:11", "*vcmpgtsw.", CODE_FOR_j_24_t_frxx_simple, B_UID(136) };
+static const struct builtin B12_vec_all_lt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:12", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(137) };
+static const struct builtin B13_vec_all_lt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:13", "*vcmpgtsb.", CODE_FOR_j_24_t_frxx_simple, B_UID(138) };
+static const struct builtin B14_vec_all_lt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:14", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(139) };
+static const struct builtin B15_vec_all_lt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:15", "*vcmpgtuh.", CODE_FOR_j_24_t_frxx_simple, B_UID(140) };
+static const struct builtin B16_vec_all_lt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:16", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(141) };
+static const struct builtin B17_vec_all_lt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:17", "*vcmpgtuw.", CODE_FOR_j_24_t_frxx_simple, B_UID(142) };
+static const struct builtin B18_vec_all_lt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:18", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(143) };
+static const struct builtin B19_vec_all_lt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24tr, 2, FALSE, FALSE, 0, "vec_all_lt:19", "*vcmpgtub.", CODE_FOR_j_24_t_frxx_simple, B_UID(144) };
+static const struct builtin B_vec_all_nan = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc26td, 1, FALSE, FALSE, 0, "vec_all_nan", "*vcmpeqfp.", CODE_FOR_j_26_t_fx_simple, B_UID(145) };
+static const struct builtin B1_vec_all_ne = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:1", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(146) };
+static const struct builtin B2_vec_all_ne = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:2", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(147) };
+static const struct builtin B3_vec_all_ne = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:3", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(148) };
+static const struct builtin B4_vec_all_ne = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:4", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(149) };
+static const struct builtin B5_vec_all_ne = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:5", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(150) };
+static const struct builtin B6_vec_all_ne = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:6", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(151) };
+static const struct builtin B7_vec_all_ne = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:7", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(152) };
+static const struct builtin B8_vec_all_ne = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:8", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(153) };
+static const struct builtin B9_vec_all_ne = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:9", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(154) };
+static const struct builtin B10_vec_all_ne = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:10", "*vcmpeqfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(155) };
+static const struct builtin B11_vec_all_ne = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:11", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(156) };
+static const struct builtin B12_vec_all_ne = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:12", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(157) };
+static const struct builtin B13_vec_all_ne = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:13", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(158) };
+static const struct builtin B14_vec_all_ne = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:14", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(159) };
+static const struct builtin B15_vec_all_ne = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:15", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(160) };
+static const struct builtin B16_vec_all_ne = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:16", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(161) };
+static const struct builtin B17_vec_all_ne = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:17", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(162) };
+static const struct builtin B18_vec_all_ne = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:18", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(163) };
+static const struct builtin B19_vec_all_ne = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:19", "*vcmpequh.", CODE_FOR_j_26_t_fxx_simple, B_UID(164) };
+static const struct builtin B20_vec_all_ne = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:20", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(165) };
+static const struct builtin B21_vec_all_ne = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:21", "*vcmpequw.", CODE_FOR_j_26_t_fxx_simple, B_UID(166) };
+static const struct builtin B22_vec_all_ne = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:22", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(167) };
+static const struct builtin B23_vec_all_ne = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ne:23", "*vcmpequb.", CODE_FOR_j_26_t_fxx_simple, B_UID(168) };
+static const struct builtin B_vec_all_nge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_nge", "*vcmpgefp.", CODE_FOR_j_26_t_fxx_simple, B_UID(169) };
+static const struct builtin B_vec_all_ngt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26t, 2, FALSE, FALSE, 0, "vec_all_ngt", "*vcmpgtfp.", CODE_FOR_j_26_t_fxx_simple, B_UID(170) };
+static const struct builtin B_vec_all_nle = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_nle", "*vcmpgefp.", CODE_FOR_j_26_t_frxx_simple, B_UID(171) };
+static const struct builtin B_vec_all_nlt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26tr, 2, FALSE, FALSE, 0, "vec_all_nlt", "*vcmpgtfp.", CODE_FOR_j_26_t_frxx_simple, B_UID(172) };
+static const struct builtin B_vec_all_numeric = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc24td, 1, FALSE, FALSE, 0, "vec_all_numeric", "*vcmpeqfp.", CODE_FOR_j_24_t_fx_simple, B_UID(173) };
+static const struct builtin B1_vec_vand = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 2, "vec_vand:1", "*vand", CODE_FOR_xfxx_simple, B_UID(174) };
+static const struct builtin B2_vec_vand = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:2", "*vand", CODE_FOR_xfxx_simple, B_UID(175) };
+static const struct builtin B3_vec_vand = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:3", "*vand", CODE_FOR_xfxx_simple, B_UID(176) };
+static const struct builtin B4_vec_vand = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 2, "vec_vand:4", "*vand", CODE_FOR_xfxx_simple, B_UID(177) };
+static const struct builtin B5_vec_vand = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:5", "*vand", CODE_FOR_xfxx_simple, B_UID(178) };
+static const struct builtin B6_vec_vand = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:6", "*vand", CODE_FOR_xfxx_simple, B_UID(179) };
+static const struct builtin B7_vec_vand = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:7", "*vand", CODE_FOR_xfxx_simple, B_UID(180) };
+static const struct builtin B8_vec_vand = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 2, "vec_vand:8", "*vand", CODE_FOR_xfxx_simple, B_UID(181) };
+static const struct builtin B9_vec_vand = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:9", "*vand", CODE_FOR_xfxx_simple, B_UID(182) };
+static const struct builtin B10_vec_vand = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:10", "*vand", CODE_FOR_xfxx_simple, B_UID(183) };
+static const struct builtin B11_vec_vand = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:11", "*vand", CODE_FOR_xfxx_simple, B_UID(184) };
+static const struct builtin B12_vec_vand = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vand:12", "*vand", CODE_FOR_xfxx_simple, B_UID(185) };
+static const struct builtin B13_vec_vand = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:13", "*vand", CODE_FOR_xfxx_simple, B_UID(186) };
+static const struct builtin B14_vec_vand = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vand:14", "*vand", CODE_FOR_xfxx_simple, B_UID(187) };
+static const struct builtin B15_vec_vand = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:15", "*vand", CODE_FOR_xfxx_simple, B_UID(188) };
+static const struct builtin B16_vec_vand = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vand:16", "*vand", CODE_FOR_xfxx_simple, B_UID(189) };
+static const struct builtin B17_vec_vand = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:17", "*vand", CODE_FOR_xfxx_simple, B_UID(190) };
+static const struct builtin B18_vec_vand = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vand:18", "*vand", CODE_FOR_xfxx_simple, B_UID(191) };
+static const struct builtin B19_vec_vand = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:19", "*vand", CODE_FOR_xfxx_simple, B_UID(192) };
+static const struct builtin B20_vec_vand = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vand:20", "*vand", CODE_FOR_xfxx_simple, B_UID(193) };
+static const struct builtin B21_vec_vand = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:21", "*vand", CODE_FOR_xfxx_simple, B_UID(194) };
+static const struct builtin B22_vec_vand = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vand:22", "*vand", CODE_FOR_xfxx_simple, B_UID(195) };
+static const struct builtin B23_vec_vand = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:23", "*vand", CODE_FOR_xfxx_simple, B_UID(196) };
+static const struct builtin B24_vec_vand = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vand:24", "*vand", CODE_FOR_xfxx_simple, B_UID(197) };
+static const struct builtin B1_vec_vandc = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 1, "vec_vandc:1", "*vandc", CODE_FOR_xfxx_simple, B_UID(198) };
+static const struct builtin B2_vec_vandc = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:2", "*vandc", CODE_FOR_xfxx_simple, B_UID(199) };
+static const struct builtin B3_vec_vandc = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:3", "*vandc", CODE_FOR_xfxx_simple, B_UID(200) };
+static const struct builtin B4_vec_vandc = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 1, "vec_vandc:4", "*vandc", CODE_FOR_xfxx_simple, B_UID(201) };
+static const struct builtin B5_vec_vandc = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:5", "*vandc", CODE_FOR_xfxx_simple, B_UID(202) };
+static const struct builtin B6_vec_vandc = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:6", "*vandc", CODE_FOR_xfxx_simple, B_UID(203) };
+static const struct builtin B7_vec_vandc = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:7", "*vandc", CODE_FOR_xfxx_simple, B_UID(204) };
+static const struct builtin B8_vec_vandc = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 1, "vec_vandc:8", "*vandc", CODE_FOR_xfxx_simple, B_UID(205) };
+static const struct builtin B9_vec_vandc = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:9", "*vandc", CODE_FOR_xfxx_simple, B_UID(206) };
+static const struct builtin B10_vec_vandc = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:10", "*vandc", CODE_FOR_xfxx_simple, B_UID(207) };
+static const struct builtin B11_vec_vandc = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:11", "*vandc", CODE_FOR_xfxx_simple, B_UID(208) };
+static const struct builtin B12_vec_vandc = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vandc:12", "*vandc", CODE_FOR_xfxx_simple, B_UID(209) };
+static const struct builtin B13_vec_vandc = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:13", "*vandc", CODE_FOR_xfxx_simple, B_UID(210) };
+static const struct builtin B14_vec_vandc = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vandc:14", "*vandc", CODE_FOR_xfxx_simple, B_UID(211) };
+static const struct builtin B15_vec_vandc = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:15", "*vandc", CODE_FOR_xfxx_simple, B_UID(212) };
+static const struct builtin B16_vec_vandc = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vandc:16", "*vandc", CODE_FOR_xfxx_simple, B_UID(213) };
+static const struct builtin B17_vec_vandc = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:17", "*vandc", CODE_FOR_xfxx_simple, B_UID(214) };
+static const struct builtin B18_vec_vandc = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vandc:18", "*vandc", CODE_FOR_xfxx_simple, B_UID(215) };
+static const struct builtin B19_vec_vandc = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:19", "*vandc", CODE_FOR_xfxx_simple, B_UID(216) };
+static const struct builtin B20_vec_vandc = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vandc:20", "*vandc", CODE_FOR_xfxx_simple, B_UID(217) };
+static const struct builtin B21_vec_vandc = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:21", "*vandc", CODE_FOR_xfxx_simple, B_UID(218) };
+static const struct builtin B22_vec_vandc = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vandc:22", "*vandc", CODE_FOR_xfxx_simple, B_UID(219) };
+static const struct builtin B23_vec_vandc = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:23", "*vandc", CODE_FOR_xfxx_simple, B_UID(220) };
+static const struct builtin B24_vec_vandc = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vandc:24", "*vandc", CODE_FOR_xfxx_simple, B_UID(221) };
+static const struct builtin B1_vec_any_eq = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:1", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(222) };
+static const struct builtin B2_vec_any_eq = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:2", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(223) };
+static const struct builtin B3_vec_any_eq = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:3", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(224) };
+static const struct builtin B4_vec_any_eq = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:4", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(225) };
+static const struct builtin B5_vec_any_eq = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:5", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(226) };
+static const struct builtin B6_vec_any_eq = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:6", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(227) };
+static const struct builtin B7_vec_any_eq = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:7", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(228) };
+static const struct builtin B8_vec_any_eq = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:8", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(229) };
+static const struct builtin B9_vec_any_eq = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:9", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(230) };
+static const struct builtin B10_vec_any_eq = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:10", "*vcmpeqfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(231) };
+static const struct builtin B11_vec_any_eq = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:11", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(232) };
+static const struct builtin B12_vec_any_eq = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:12", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(233) };
+static const struct builtin B13_vec_any_eq = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:13", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(234) };
+static const struct builtin B14_vec_any_eq = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:14", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(235) };
+static const struct builtin B15_vec_any_eq = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:15", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(236) };
+static const struct builtin B16_vec_any_eq = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:16", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(237) };
+static const struct builtin B17_vec_any_eq = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:17", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(238) };
+static const struct builtin B18_vec_any_eq = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:18", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(239) };
+static const struct builtin B19_vec_any_eq = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:19", "*vcmpequh.", CODE_FOR_j_26_f_fxx_simple, B_UID(240) };
+static const struct builtin B20_vec_any_eq = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:20", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(241) };
+static const struct builtin B21_vec_any_eq = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:21", "*vcmpequw.", CODE_FOR_j_26_f_fxx_simple, B_UID(242) };
+static const struct builtin B22_vec_any_eq = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:22", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(243) };
+static const struct builtin B23_vec_any_eq = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_eq:23", "*vcmpequb.", CODE_FOR_j_26_f_fxx_simple, B_UID(244) };
+static const struct builtin B1_vec_any_ge = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:1", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(245) };
+static const struct builtin B2_vec_any_ge = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:2", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(246) };
+static const struct builtin B3_vec_any_ge = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:3", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(247) };
+static const struct builtin B4_vec_any_ge = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:4", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(248) };
+static const struct builtin B5_vec_any_ge = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:5", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(249) };
+static const struct builtin B6_vec_any_ge = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:6", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(250) };
+static const struct builtin B7_vec_any_ge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_ge:7", "*vcmpgefp.", CODE_FOR_j_26_f_fxx_simple, B_UID(251) };
+static const struct builtin B8_vec_any_ge = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:8", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(252) };
+static const struct builtin B9_vec_any_ge = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:9", "*vcmpgtsh.", CODE_FOR_j_24_f_frxx_simple, B_UID(253) };
+static const struct builtin B10_vec_any_ge = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:10", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(254) };
+static const struct builtin B11_vec_any_ge = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:11", "*vcmpgtsw.", CODE_FOR_j_24_f_frxx_simple, B_UID(255) };
+static const struct builtin B12_vec_any_ge = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:12", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(256) };
+static const struct builtin B13_vec_any_ge = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:13", "*vcmpgtsb.", CODE_FOR_j_24_f_frxx_simple, B_UID(257) };
+static const struct builtin B14_vec_any_ge = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:14", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(258) };
+static const struct builtin B15_vec_any_ge = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:15", "*vcmpgtuh.", CODE_FOR_j_24_f_frxx_simple, B_UID(259) };
+static const struct builtin B16_vec_any_ge = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:16", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(260) };
+static const struct builtin B17_vec_any_ge = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:17", "*vcmpgtuw.", CODE_FOR_j_24_f_frxx_simple, B_UID(261) };
+static const struct builtin B18_vec_any_ge = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:18", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(262) };
+static const struct builtin B19_vec_any_ge = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_ge:19", "*vcmpgtub.", CODE_FOR_j_24_f_frxx_simple, B_UID(263) };
+static const struct builtin B1_vec_any_gt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:1", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(264) };
+static const struct builtin B2_vec_any_gt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:2", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(265) };
+static const struct builtin B3_vec_any_gt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:3", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(266) };
+static const struct builtin B4_vec_any_gt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:4", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(267) };
+static const struct builtin B5_vec_any_gt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:5", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(268) };
+static const struct builtin B6_vec_any_gt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:6", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(269) };
+static const struct builtin B7_vec_any_gt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:7", "*vcmpgtfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(270) };
+static const struct builtin B8_vec_any_gt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:8", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(271) };
+static const struct builtin B9_vec_any_gt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:9", "*vcmpgtsh.", CODE_FOR_j_26_f_fxx_simple, B_UID(272) };
+static const struct builtin B10_vec_any_gt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:10", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(273) };
+static const struct builtin B11_vec_any_gt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:11", "*vcmpgtsw.", CODE_FOR_j_26_f_fxx_simple, B_UID(274) };
+static const struct builtin B12_vec_any_gt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:12", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(275) };
+static const struct builtin B13_vec_any_gt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:13", "*vcmpgtsb.", CODE_FOR_j_26_f_fxx_simple, B_UID(276) };
+static const struct builtin B14_vec_any_gt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:14", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(277) };
+static const struct builtin B15_vec_any_gt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:15", "*vcmpgtuh.", CODE_FOR_j_26_f_fxx_simple, B_UID(278) };
+static const struct builtin B16_vec_any_gt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:16", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(279) };
+static const struct builtin B17_vec_any_gt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:17", "*vcmpgtuw.", CODE_FOR_j_26_f_fxx_simple, B_UID(280) };
+static const struct builtin B18_vec_any_gt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:18", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(281) };
+static const struct builtin B19_vec_any_gt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_gt:19", "*vcmpgtub.", CODE_FOR_j_26_f_fxx_simple, B_UID(282) };
+static const struct builtin B1_vec_any_le = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:1", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(283) };
+static const struct builtin B2_vec_any_le = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:2", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(284) };
+static const struct builtin B3_vec_any_le = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:3", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(285) };
+static const struct builtin B4_vec_any_le = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:4", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(286) };
+static const struct builtin B5_vec_any_le = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:5", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(287) };
+static const struct builtin B6_vec_any_le = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:6", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(288) };
+static const struct builtin B7_vec_any_le = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_le:7", "*vcmpgefp.", CODE_FOR_j_26_f_frxx_simple, B_UID(289) };
+static const struct builtin B8_vec_any_le = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:8", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(290) };
+static const struct builtin B9_vec_any_le = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:9", "*vcmpgtsh.", CODE_FOR_j_24_f_fxx_simple, B_UID(291) };
+static const struct builtin B10_vec_any_le = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:10", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(292) };
+static const struct builtin B11_vec_any_le = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:11", "*vcmpgtsw.", CODE_FOR_j_24_f_fxx_simple, B_UID(293) };
+static const struct builtin B12_vec_any_le = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:12", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(294) };
+static const struct builtin B13_vec_any_le = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:13", "*vcmpgtsb.", CODE_FOR_j_24_f_fxx_simple, B_UID(295) };
+static const struct builtin B14_vec_any_le = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:14", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(296) };
+static const struct builtin B15_vec_any_le = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:15", "*vcmpgtuh.", CODE_FOR_j_24_f_fxx_simple, B_UID(297) };
+static const struct builtin B16_vec_any_le = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:16", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(298) };
+static const struct builtin B17_vec_any_le = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:17", "*vcmpgtuw.", CODE_FOR_j_24_f_fxx_simple, B_UID(299) };
+static const struct builtin B18_vec_any_le = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:18", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(300) };
+static const struct builtin B19_vec_any_le = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_le:19", "*vcmpgtub.", CODE_FOR_j_24_f_fxx_simple, B_UID(301) };
+static const struct builtin B1_vec_any_lt = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:1", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(302) };
+static const struct builtin B2_vec_any_lt = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:2", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(303) };
+static const struct builtin B3_vec_any_lt = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:3", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(304) };
+static const struct builtin B4_vec_any_lt = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:4", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(305) };
+static const struct builtin B5_vec_any_lt = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:5", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(306) };
+static const struct builtin B6_vec_any_lt = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:6", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(307) };
+static const struct builtin B7_vec_any_lt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:7", "*vcmpgtfp.", CODE_FOR_j_26_f_frxx_simple, B_UID(308) };
+static const struct builtin B8_vec_any_lt = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:8", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(309) };
+static const struct builtin B9_vec_any_lt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:9", "*vcmpgtsh.", CODE_FOR_j_26_f_frxx_simple, B_UID(310) };
+static const struct builtin B10_vec_any_lt = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:10", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(311) };
+static const struct builtin B11_vec_any_lt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:11", "*vcmpgtsw.", CODE_FOR_j_26_f_frxx_simple, B_UID(312) };
+static const struct builtin B12_vec_any_lt = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:12", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(313) };
+static const struct builtin B13_vec_any_lt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:13", "*vcmpgtsb.", CODE_FOR_j_26_f_frxx_simple, B_UID(314) };
+static const struct builtin B14_vec_any_lt = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:14", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(315) };
+static const struct builtin B15_vec_any_lt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:15", "*vcmpgtuh.", CODE_FOR_j_26_f_frxx_simple, B_UID(316) };
+static const struct builtin B16_vec_any_lt = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:16", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(317) };
+static const struct builtin B17_vec_any_lt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:17", "*vcmpgtuw.", CODE_FOR_j_26_f_frxx_simple, B_UID(318) };
+static const struct builtin B18_vec_any_lt = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:18", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(319) };
+static const struct builtin B19_vec_any_lt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc26fr, 2, FALSE, FALSE, 0, "vec_any_lt:19", "*vcmpgtub.", CODE_FOR_j_26_f_frxx_simple, B_UID(320) };
+static const struct builtin B_vec_any_nan = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc24fd, 1, FALSE, FALSE, 0, "vec_any_nan", "*vcmpeqfp.", CODE_FOR_j_24_f_fx_simple, B_UID(321) };
+static const struct builtin B1_vec_any_ne = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:1", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(322) };
+static const struct builtin B2_vec_any_ne = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:2", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(323) };
+static const struct builtin B3_vec_any_ne = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:3", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(324) };
+static const struct builtin B4_vec_any_ne = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:4", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(325) };
+static const struct builtin B5_vec_any_ne = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:5", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(326) };
+static const struct builtin B6_vec_any_ne = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:6", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(327) };
+static const struct builtin B7_vec_any_ne = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:7", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(328) };
+static const struct builtin B8_vec_any_ne = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:8", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(329) };
+static const struct builtin B9_vec_any_ne = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:9", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(330) };
+static const struct builtin B10_vec_any_ne = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:10", "*vcmpeqfp.", CODE_FOR_j_24_f_fxx_simple, B_UID(331) };
+static const struct builtin B11_vec_any_ne = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:11", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(332) };
+static const struct builtin B12_vec_any_ne = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:12", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(333) };
+static const struct builtin B13_vec_any_ne = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:13", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(334) };
+static const struct builtin B14_vec_any_ne = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:14", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(335) };
+static const struct builtin B15_vec_any_ne = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:15", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(336) };
+static const struct builtin B16_vec_any_ne = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:16", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(337) };
+static const struct builtin B17_vec_any_ne = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:17", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(338) };
+static const struct builtin B18_vec_any_ne = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:18", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(339) };
+static const struct builtin B19_vec_any_ne = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:19", "*vcmpequh.", CODE_FOR_j_24_f_fxx_simple, B_UID(340) };
+static const struct builtin B20_vec_any_ne = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:20", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(341) };
+static const struct builtin B21_vec_any_ne = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:21", "*vcmpequw.", CODE_FOR_j_24_f_fxx_simple, B_UID(342) };
+static const struct builtin B22_vec_any_ne = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:22", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(343) };
+static const struct builtin B23_vec_any_ne = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ne:23", "*vcmpequb.", CODE_FOR_j_24_f_fxx_simple, B_UID(344) };
+static const struct builtin B_vec_any_nge = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_nge", "*vcmpgefp.", CODE_FOR_j_24_f_fxx_simple, B_UID(345) };
+static const struct builtin B_vec_any_ngt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24f, 2, FALSE, FALSE, 0, "vec_any_ngt", "*vcmpgtfp.", CODE_FOR_j_24_f_fxx_simple, B_UID(346) };
+static const struct builtin B_vec_any_nle = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_nle", "*vcmpgefp.", CODE_FOR_j_24_f_frxx_simple, B_UID(347) };
+static const struct builtin B_vec_any_nlt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc24fr, 2, FALSE, FALSE, 0, "vec_any_nlt", "*vcmpgtfp.", CODE_FOR_j_24_f_frxx_simple, B_UID(348) };
+static const struct builtin B_vec_any_numeric = { { &T_vec_f32, NULL, NULL, }, "x", &T_cc26fd, 1, FALSE, FALSE, 0, "vec_any_numeric", "*vcmpeqfp.", CODE_FOR_j_26_f_fx_simple, B_UID(349) };
+static const struct builtin B_vec_any_out = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_cc26f, 2, FALSE, FALSE, 0, "vec_any_out", "*vcmpbfp.", CODE_FOR_j_26_f_fxx_simple, B_UID(350) };
+static const struct builtin B_vec_vavgsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vavgsh", "*vavgsh", CODE_FOR_xfxx_simple, B_UID(351) };
+static const struct builtin B_vec_vavgsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vavgsw", "*vavgsw", CODE_FOR_xfxx_simple, B_UID(352) };
+static const struct builtin B_vec_vavgsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vavgsb", "*vavgsb", CODE_FOR_xfxx_simple, B_UID(353) };
+static const struct builtin B_vec_vavguh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vavguh", "*vavguh", CODE_FOR_xfxx_simple, B_UID(354) };
+static const struct builtin B_vec_vavguw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vavguw", "*vavguw", CODE_FOR_xfxx_simple, B_UID(355) };
+static const struct builtin B_vec_vavgub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vavgub", "*vavgub", CODE_FOR_xfxx_simple, B_UID(356) };
+static const struct builtin B_vec_vrfip = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfip", "*vrfip", CODE_FOR_xfx_fp, B_UID(357) };
+static const struct builtin B_vec_vcmpbfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vcmpbfp", "*vcmpbfp", CODE_FOR_xfxx_simple, B_UID(358) };
+static const struct builtin B_vec_vcmpeqfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpeqfp", "*vcmpeqfp", CODE_FOR_xfxx_simple, B_UID(359) };
+static const struct builtin B1_vec_vcmpequh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 7, "vec_vcmpequh:1", "*vcmpequh", CODE_FOR_xfxx_simple, B_UID(360) };
+static const struct builtin B1_vec_vcmpequw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpequw:1", "*vcmpequw", CODE_FOR_xfxx_simple, B_UID(361) };
+static const struct builtin B1_vec_vcmpequb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 7, "vec_vcmpequb:1", "*vcmpequb", CODE_FOR_xfxx_simple, B_UID(362) };
+static const struct builtin B2_vec_vcmpequh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 7, "vec_vcmpequh:2", "*vcmpequh", CODE_FOR_xfxx_simple, B_UID(363) };
+static const struct builtin B2_vec_vcmpequw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 7, "vec_vcmpequw:2", "*vcmpequw", CODE_FOR_xfxx_simple, B_UID(364) };
+static const struct builtin B2_vec_vcmpequb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 7, "vec_vcmpequb:2", "*vcmpequb", CODE_FOR_xfxx_simple, B_UID(365) };
+static const struct builtin B_vec_vcmpgefp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgefp", "*vcmpgefp", CODE_FOR_xfxx_simple, B_UID(366) };
+static const struct builtin B_vec_vcmpgtfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtfp", "*vcmpgtfp", CODE_FOR_xfxx_simple, B_UID(367) };
+static const struct builtin B_vec_vcmpgtsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vcmpgtsh", "*vcmpgtsh", CODE_FOR_xfxx_simple, B_UID(368) };
+static const struct builtin B_vec_vcmpgtsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtsw", "*vcmpgtsw", CODE_FOR_xfxx_simple, B_UID(369) };
+static const struct builtin B_vec_vcmpgtsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vcmpgtsb", "*vcmpgtsb", CODE_FOR_xfxx_simple, B_UID(370) };
+static const struct builtin B_vec_vcmpgtuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vcmpgtuh", "*vcmpgtuh", CODE_FOR_xfxx_simple, B_UID(371) };
+static const struct builtin B_vec_vcmpgtuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vcmpgtuw", "*vcmpgtuw", CODE_FOR_xfxx_simple, B_UID(372) };
+static const struct builtin B_vec_vcmpgtub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vcmpgtub", "*vcmpgtub", CODE_FOR_xfxx_simple, B_UID(373) };
+static const struct builtin B_vec_cmple = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmple", "*vcmpgefp", CODE_FOR_xfxx_simple, B_UID(374) };
+static const struct builtin B1_vec_cmplt = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:1", "*vcmpgtfp", CODE_FOR_xfxx_simple, B_UID(375) };
+static const struct builtin B2_vec_cmplt = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 10, "vec_cmplt:2", "*vcmpgtsh", CODE_FOR_xfxx_simple, B_UID(376) };
+static const struct builtin B3_vec_cmplt = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:3", "*vcmpgtsw", CODE_FOR_xfxx_simple, B_UID(377) };
+static const struct builtin B4_vec_cmplt = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 10, "vec_cmplt:4", "*vcmpgtsb", CODE_FOR_xfxx_simple, B_UID(378) };
+static const struct builtin B5_vec_cmplt = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 10, "vec_cmplt:5", "*vcmpgtuh", CODE_FOR_xfxx_simple, B_UID(379) };
+static const struct builtin B6_vec_cmplt = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 10, "vec_cmplt:6", "*vcmpgtuw", CODE_FOR_xfxx_simple, B_UID(380) };
+static const struct builtin B7_vec_cmplt = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 10, "vec_cmplt:7", "*vcmpgtub", CODE_FOR_xfxx_simple, B_UID(381) };
+static const struct builtin B_vec_vcfsx = { { &T_vec_s32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vcfsx", "*vcfsx", CODE_FOR_xfxB_fp, B_UID(382) };
+static const struct builtin B_vec_vcfux = { { &T_vec_u32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vcfux", "*vcfux", CODE_FOR_xfxB_fp, B_UID(383) };
+static const struct builtin B_vec_vctsxs = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vctsxs", "*vctsxs", CODE_FOR_xfxB_fp, B_UID(384) };
+static const struct builtin B_vec_vctuxs = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vctuxs", "*vctuxs", CODE_FOR_xfxB_fp, B_UID(385) };
+static const struct builtin B_vec_dss = { { &T_immed_u2, NULL, NULL, }, "D", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_dss", "*dss", CODE_FOR_vlfD_load, B_UID(386) };
+static const struct builtin B_vec_dssall = { { NULL, NULL, NULL, }, "", &T_volatile_void, 0, FALSE, FALSE, 0, "vec_dssall", "*dssall", CODE_FOR_vlf_load, B_UID(387) };
+static const struct builtin B1_vec_dst = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:1", "*dst", CODE_FOR_vlfiiD_load, B_UID(388) };
+static const struct builtin B2_vec_dst = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:2", "*dst", CODE_FOR_vlfiiD_load, B_UID(389) };
+static const struct builtin B3_vec_dst = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:3", "*dst", CODE_FOR_vlfiiD_load, B_UID(390) };
+static const struct builtin B4_vec_dst = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:4", "*dst", CODE_FOR_vlfiiD_load, B_UID(391) };
+static const struct builtin B5_vec_dst = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:5", "*dst", CODE_FOR_vlfiiD_load, B_UID(392) };
+static const struct builtin B6_vec_dst = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:6", "*dst", CODE_FOR_vlfiiD_load, B_UID(393) };
+static const struct builtin B7_vec_dst = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:7", "*dst", CODE_FOR_vlfiiD_load, B_UID(394) };
+static const struct builtin B8_vec_dst = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:8", "*dst", CODE_FOR_vlfiiD_load, B_UID(395) };
+static const struct builtin B9_vec_dst = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:9", "*dst", CODE_FOR_vlfiiD_load, B_UID(396) };
+static const struct builtin B10_vec_dst = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:10", "*dst", CODE_FOR_vlfiiD_load, B_UID(397) };
+static const struct builtin B11_vec_dst = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:11", "*dst", CODE_FOR_vlfiiD_load, B_UID(398) };
+static const struct builtin B12_vec_dst = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:12", "*dst", CODE_FOR_vlfiiD_load, B_UID(399) };
+static const struct builtin B13_vec_dst = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:13", "*dst", CODE_FOR_vlfiiD_load, B_UID(400) };
+static const struct builtin B14_vec_dst = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:14", "*dst", CODE_FOR_vlfiiD_load, B_UID(401) };
+static const struct builtin B15_vec_dst = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:15", "*dst", CODE_FOR_vlfiiD_load, B_UID(402) };
+static const struct builtin B16_vec_dst = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:16", "*dst", CODE_FOR_vlfiiD_load, B_UID(403) };
+static const struct builtin B17_vec_dst = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:17", "*dst", CODE_FOR_vlfiiD_load, B_UID(404) };
+static const struct builtin B18_vec_dst = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:18", "*dst", CODE_FOR_vlfiiD_load, B_UID(405) };
+static const struct builtin B19_vec_dst = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:19", "*dst", CODE_FOR_vlfiiD_load, B_UID(406) };
+static const struct builtin B20_vec_dst = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dst:20", "*dst", CODE_FOR_vlfiiD_load, B_UID(407) };
+static const struct builtin B1_vec_dstst = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:1", "*dstst", CODE_FOR_vlfiiD_load, B_UID(408) };
+static const struct builtin B2_vec_dstst = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:2", "*dstst", CODE_FOR_vlfiiD_load, B_UID(409) };
+static const struct builtin B3_vec_dstst = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:3", "*dstst", CODE_FOR_vlfiiD_load, B_UID(410) };
+static const struct builtin B4_vec_dstst = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:4", "*dstst", CODE_FOR_vlfiiD_load, B_UID(411) };
+static const struct builtin B5_vec_dstst = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:5", "*dstst", CODE_FOR_vlfiiD_load, B_UID(412) };
+static const struct builtin B6_vec_dstst = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:6", "*dstst", CODE_FOR_vlfiiD_load, B_UID(413) };
+static const struct builtin B7_vec_dstst = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:7", "*dstst", CODE_FOR_vlfiiD_load, B_UID(414) };
+static const struct builtin B8_vec_dstst = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:8", "*dstst", CODE_FOR_vlfiiD_load, B_UID(415) };
+static const struct builtin B9_vec_dstst = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:9", "*dstst", CODE_FOR_vlfiiD_load, B_UID(416) };
+static const struct builtin B10_vec_dstst = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:10", "*dstst", CODE_FOR_vlfiiD_load, B_UID(417) };
+static const struct builtin B11_vec_dstst = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:11", "*dstst", CODE_FOR_vlfiiD_load, B_UID(418) };
+static const struct builtin B12_vec_dstst = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:12", "*dstst", CODE_FOR_vlfiiD_load, B_UID(419) };
+static const struct builtin B13_vec_dstst = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:13", "*dstst", CODE_FOR_vlfiiD_load, B_UID(420) };
+static const struct builtin B14_vec_dstst = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:14", "*dstst", CODE_FOR_vlfiiD_load, B_UID(421) };
+static const struct builtin B15_vec_dstst = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:15", "*dstst", CODE_FOR_vlfiiD_load, B_UID(422) };
+static const struct builtin B16_vec_dstst = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:16", "*dstst", CODE_FOR_vlfiiD_load, B_UID(423) };
+static const struct builtin B17_vec_dstst = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:17", "*dstst", CODE_FOR_vlfiiD_load, B_UID(424) };
+static const struct builtin B18_vec_dstst = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:18", "*dstst", CODE_FOR_vlfiiD_load, B_UID(425) };
+static const struct builtin B19_vec_dstst = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:19", "*dstst", CODE_FOR_vlfiiD_load, B_UID(426) };
+static const struct builtin B20_vec_dstst = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstst:20", "*dstst", CODE_FOR_vlfiiD_load, B_UID(427) };
+static const struct builtin B1_vec_dststt = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:1", "*dststt", CODE_FOR_vlfiiD_load, B_UID(428) };
+static const struct builtin B2_vec_dststt = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:2", "*dststt", CODE_FOR_vlfiiD_load, B_UID(429) };
+static const struct builtin B3_vec_dststt = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:3", "*dststt", CODE_FOR_vlfiiD_load, B_UID(430) };
+static const struct builtin B4_vec_dststt = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:4", "*dststt", CODE_FOR_vlfiiD_load, B_UID(431) };
+static const struct builtin B5_vec_dststt = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:5", "*dststt", CODE_FOR_vlfiiD_load, B_UID(432) };
+static const struct builtin B6_vec_dststt = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:6", "*dststt", CODE_FOR_vlfiiD_load, B_UID(433) };
+static const struct builtin B7_vec_dststt = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:7", "*dststt", CODE_FOR_vlfiiD_load, B_UID(434) };
+static const struct builtin B8_vec_dststt = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:8", "*dststt", CODE_FOR_vlfiiD_load, B_UID(435) };
+static const struct builtin B9_vec_dststt = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:9", "*dststt", CODE_FOR_vlfiiD_load, B_UID(436) };
+static const struct builtin B10_vec_dststt = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:10", "*dststt", CODE_FOR_vlfiiD_load, B_UID(437) };
+static const struct builtin B11_vec_dststt = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:11", "*dststt", CODE_FOR_vlfiiD_load, B_UID(438) };
+static const struct builtin B12_vec_dststt = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:12", "*dststt", CODE_FOR_vlfiiD_load, B_UID(439) };
+static const struct builtin B13_vec_dststt = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:13", "*dststt", CODE_FOR_vlfiiD_load, B_UID(440) };
+static const struct builtin B14_vec_dststt = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:14", "*dststt", CODE_FOR_vlfiiD_load, B_UID(441) };
+static const struct builtin B15_vec_dststt = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:15", "*dststt", CODE_FOR_vlfiiD_load, B_UID(442) };
+static const struct builtin B16_vec_dststt = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:16", "*dststt", CODE_FOR_vlfiiD_load, B_UID(443) };
+static const struct builtin B17_vec_dststt = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:17", "*dststt", CODE_FOR_vlfiiD_load, B_UID(444) };
+static const struct builtin B18_vec_dststt = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:18", "*dststt", CODE_FOR_vlfiiD_load, B_UID(445) };
+static const struct builtin B19_vec_dststt = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:19", "*dststt", CODE_FOR_vlfiiD_load, B_UID(446) };
+static const struct builtin B20_vec_dststt = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dststt:20", "*dststt", CODE_FOR_vlfiiD_load, B_UID(447) };
+static const struct builtin B1_vec_dstt = { { &T_const_float_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:1", "*dstt", CODE_FOR_vlfiiD_load, B_UID(448) };
+static const struct builtin B2_vec_dstt = { { &T_const_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:2", "*dstt", CODE_FOR_vlfiiD_load, B_UID(449) };
+static const struct builtin B3_vec_dstt = { { &T_const_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:3", "*dstt", CODE_FOR_vlfiiD_load, B_UID(450) };
+static const struct builtin B4_vec_dstt = { { &T_const_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:4", "*dstt", CODE_FOR_vlfiiD_load, B_UID(451) };
+static const struct builtin B5_vec_dstt = { { &T_const_signed_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:5", "*dstt", CODE_FOR_vlfiiD_load, B_UID(452) };
+static const struct builtin B6_vec_dstt = { { &T_const_unsigned_char_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:6", "*dstt", CODE_FOR_vlfiiD_load, B_UID(453) };
+static const struct builtin B7_vec_dstt = { { &T_const_unsigned_int_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:7", "*dstt", CODE_FOR_vlfiiD_load, B_UID(454) };
+static const struct builtin B8_vec_dstt = { { &T_const_unsigned_long_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:8", "*dstt", CODE_FOR_vlfiiD_load, B_UID(455) };
+static const struct builtin B9_vec_dstt = { { &T_const_unsigned_short_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:9", "*dstt", CODE_FOR_vlfiiD_load, B_UID(456) };
+static const struct builtin B10_vec_dstt = { { &T_const_vec_b16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:10", "*dstt", CODE_FOR_vlfiiD_load, B_UID(457) };
+static const struct builtin B11_vec_dstt = { { &T_const_vec_b32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:11", "*dstt", CODE_FOR_vlfiiD_load, B_UID(458) };
+static const struct builtin B12_vec_dstt = { { &T_const_vec_b8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:12", "*dstt", CODE_FOR_vlfiiD_load, B_UID(459) };
+static const struct builtin B13_vec_dstt = { { &T_const_vec_f32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:13", "*dstt", CODE_FOR_vlfiiD_load, B_UID(460) };
+static const struct builtin B14_vec_dstt = { { &T_const_vec_p16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:14", "*dstt", CODE_FOR_vlfiiD_load, B_UID(461) };
+static const struct builtin B15_vec_dstt = { { &T_const_vec_s16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:15", "*dstt", CODE_FOR_vlfiiD_load, B_UID(462) };
+static const struct builtin B16_vec_dstt = { { &T_const_vec_s32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:16", "*dstt", CODE_FOR_vlfiiD_load, B_UID(463) };
+static const struct builtin B17_vec_dstt = { { &T_const_vec_s8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:17", "*dstt", CODE_FOR_vlfiiD_load, B_UID(464) };
+static const struct builtin B18_vec_dstt = { { &T_const_vec_u16_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:18", "*dstt", CODE_FOR_vlfiiD_load, B_UID(465) };
+static const struct builtin B19_vec_dstt = { { &T_const_vec_u32_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:19", "*dstt", CODE_FOR_vlfiiD_load, B_UID(466) };
+static const struct builtin B20_vec_dstt = { { &T_const_vec_u8_ptr, &T_int, &T_immed_u2, }, "iiD", &T_volatile_void, 3, TRUE, FALSE, 0, "vec_dstt:20", "*dstt", CODE_FOR_vlfiiD_load, B_UID(467) };
+static const struct builtin B_vec_vexptefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vexptefp", "*vexptefp", CODE_FOR_xfx_fp, B_UID(468) };
+static const struct builtin B_vec_vrfim = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfim", "*vrfim", CODE_FOR_xfx_fp, B_UID(469) };
+static const struct builtin B1_vec_lvx = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvx:1", "*lvx", CODE_FOR_xlfii_load, B_UID(470) };
+static const struct builtin B2_vec_lvx = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:2", "*lvx", CODE_FOR_xlfii_load, B_UID(471) };
+static const struct builtin B3_vec_lvx = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:3", "*lvx", CODE_FOR_xlfii_load, B_UID(472) };
+static const struct builtin B4_vec_lvx = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvx:4", "*lvx", CODE_FOR_xlfii_load, B_UID(473) };
+static const struct builtin B5_vec_lvx = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvx:5", "*lvx", CODE_FOR_xlfii_load, B_UID(474) };
+static const struct builtin B6_vec_lvx = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvx:6", "*lvx", CODE_FOR_xlfii_load, B_UID(475) };
+static const struct builtin B7_vec_lvx = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:7", "*lvx", CODE_FOR_xlfii_load, B_UID(476) };
+static const struct builtin B8_vec_lvx = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:8", "*lvx", CODE_FOR_xlfii_load, B_UID(477) };
+static const struct builtin B9_vec_lvx = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvx:9", "*lvx", CODE_FOR_xlfii_load, B_UID(478) };
+static const struct builtin B10_vec_lvx = { { &T_int, &T_const_vec_b16_ptr, NULL, }, "ii", &T_vec_b16, 2, TRUE, FALSE, 0, "vec_lvx:10", "*lvx", CODE_FOR_xlfii_load, B_UID(479) };
+static const struct builtin B11_vec_lvx = { { &T_int, &T_const_vec_b32_ptr, NULL, }, "ii", &T_vec_b32, 2, TRUE, FALSE, 0, "vec_lvx:11", "*lvx", CODE_FOR_xlfii_load, B_UID(480) };
+static const struct builtin B12_vec_lvx = { { &T_int, &T_const_vec_b8_ptr, NULL, }, "ii", &T_vec_b8, 2, TRUE, FALSE, 0, "vec_lvx:12", "*lvx", CODE_FOR_xlfii_load, B_UID(481) };
+static const struct builtin B13_vec_lvx = { { &T_int, &T_const_vec_f32_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvx:13", "*lvx", CODE_FOR_xlfii_load, B_UID(482) };
+static const struct builtin B14_vec_lvx = { { &T_int, &T_const_vec_p16_ptr, NULL, }, "ii", &T_vec_p16, 2, TRUE, FALSE, 0, "vec_lvx:14", "*lvx", CODE_FOR_xlfii_load, B_UID(483) };
+static const struct builtin B15_vec_lvx = { { &T_int, &T_const_vec_s16_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvx:15", "*lvx", CODE_FOR_xlfii_load, B_UID(484) };
+static const struct builtin B16_vec_lvx = { { &T_int, &T_const_vec_s32_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvx:16", "*lvx", CODE_FOR_xlfii_load, B_UID(485) };
+static const struct builtin B17_vec_lvx = { { &T_int, &T_const_vec_s8_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvx:17", "*lvx", CODE_FOR_xlfii_load, B_UID(486) };
+static const struct builtin B18_vec_lvx = { { &T_int, &T_const_vec_u16_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvx:18", "*lvx", CODE_FOR_xlfii_load, B_UID(487) };
+static const struct builtin B19_vec_lvx = { { &T_int, &T_const_vec_u32_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvx:19", "*lvx", CODE_FOR_xlfii_load, B_UID(488) };
+static const struct builtin B20_vec_lvx = { { &T_int, &T_const_vec_u8_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvx:20", "*lvx", CODE_FOR_xlfii_load, B_UID(489) };
+static const struct builtin B1_vec_lvewx = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvewx:1", "*lvewx", CODE_FOR_xlfii_load, B_UID(490) };
+static const struct builtin B2_vec_lvewx = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvewx:2", "*lvewx", CODE_FOR_xlfii_load, B_UID(491) };
+static const struct builtin B3_vec_lvewx = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvewx:3", "*lvewx", CODE_FOR_xlfii_load, B_UID(492) };
+static const struct builtin B1_vec_lvehx = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvehx:1", "*lvehx", CODE_FOR_xlfii_load, B_UID(493) };
+static const struct builtin B1_vec_lvebx = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvebx:1", "*lvebx", CODE_FOR_xlfii_load, B_UID(494) };
+static const struct builtin B2_vec_lvebx = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvebx:2", "*lvebx", CODE_FOR_xlfii_load, B_UID(495) };
+static const struct builtin B4_vec_lvewx = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvewx:4", "*lvewx", CODE_FOR_xlfii_load, B_UID(496) };
+static const struct builtin B5_vec_lvewx = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvewx:5", "*lvewx", CODE_FOR_xlfii_load, B_UID(497) };
+static const struct builtin B2_vec_lvehx = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvehx:2", "*lvehx", CODE_FOR_xlfii_load, B_UID(498) };
+static const struct builtin B1_vec_lvxl = { { &T_int, &T_const_float_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvxl:1", "*lvxl", CODE_FOR_xlfii_load, B_UID(499) };
+static const struct builtin B2_vec_lvxl = { { &T_int, &T_const_int_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:2", "*lvxl", CODE_FOR_xlfii_load, B_UID(500) };
+static const struct builtin B3_vec_lvxl = { { &T_int, &T_const_long_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:3", "*lvxl", CODE_FOR_xlfii_load, B_UID(501) };
+static const struct builtin B4_vec_lvxl = { { &T_int, &T_const_short_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvxl:4", "*lvxl", CODE_FOR_xlfii_load, B_UID(502) };
+static const struct builtin B5_vec_lvxl = { { &T_int, &T_const_signed_char_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvxl:5", "*lvxl", CODE_FOR_xlfii_load, B_UID(503) };
+static const struct builtin B6_vec_lvxl = { { &T_int, &T_const_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvxl:6", "*lvxl", CODE_FOR_xlfii_load, B_UID(504) };
+static const struct builtin B7_vec_lvxl = { { &T_int, &T_const_unsigned_int_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:7", "*lvxl", CODE_FOR_xlfii_load, B_UID(505) };
+static const struct builtin B8_vec_lvxl = { { &T_int, &T_const_unsigned_long_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:8", "*lvxl", CODE_FOR_xlfii_load, B_UID(506) };
+static const struct builtin B9_vec_lvxl = { { &T_int, &T_const_unsigned_short_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvxl:9", "*lvxl", CODE_FOR_xlfii_load, B_UID(507) };
+static const struct builtin B10_vec_lvxl = { { &T_int, &T_const_vec_b16_ptr, NULL, }, "ii", &T_vec_b16, 2, TRUE, FALSE, 0, "vec_lvxl:10", "*lvxl", CODE_FOR_xlfii_load, B_UID(508) };
+static const struct builtin B11_vec_lvxl = { { &T_int, &T_const_vec_b32_ptr, NULL, }, "ii", &T_vec_b32, 2, TRUE, FALSE, 0, "vec_lvxl:11", "*lvxl", CODE_FOR_xlfii_load, B_UID(509) };
+static const struct builtin B12_vec_lvxl = { { &T_int, &T_const_vec_b8_ptr, NULL, }, "ii", &T_vec_b8, 2, TRUE, FALSE, 0, "vec_lvxl:12", "*lvxl", CODE_FOR_xlfii_load, B_UID(510) };
+static const struct builtin B13_vec_lvxl = { { &T_int, &T_const_vec_f32_ptr, NULL, }, "ii", &T_vec_f32, 2, TRUE, FALSE, 0, "vec_lvxl:13", "*lvxl", CODE_FOR_xlfii_load, B_UID(511) };
+static const struct builtin B14_vec_lvxl = { { &T_int, &T_const_vec_p16_ptr, NULL, }, "ii", &T_vec_p16, 2, TRUE, FALSE, 0, "vec_lvxl:14", "*lvxl", CODE_FOR_xlfii_load, B_UID(512) };
+static const struct builtin B15_vec_lvxl = { { &T_int, &T_const_vec_s16_ptr, NULL, }, "ii", &T_vec_s16, 2, TRUE, FALSE, 0, "vec_lvxl:15", "*lvxl", CODE_FOR_xlfii_load, B_UID(513) };
+static const struct builtin B16_vec_lvxl = { { &T_int, &T_const_vec_s32_ptr, NULL, }, "ii", &T_vec_s32, 2, TRUE, FALSE, 0, "vec_lvxl:16", "*lvxl", CODE_FOR_xlfii_load, B_UID(514) };
+static const struct builtin B17_vec_lvxl = { { &T_int, &T_const_vec_s8_ptr, NULL, }, "ii", &T_vec_s8, 2, TRUE, FALSE, 0, "vec_lvxl:17", "*lvxl", CODE_FOR_xlfii_load, B_UID(515) };
+static const struct builtin B18_vec_lvxl = { { &T_int, &T_const_vec_u16_ptr, NULL, }, "ii", &T_vec_u16, 2, TRUE, FALSE, 0, "vec_lvxl:18", "*lvxl", CODE_FOR_xlfii_load, B_UID(516) };
+static const struct builtin B19_vec_lvxl = { { &T_int, &T_const_vec_u32_ptr, NULL, }, "ii", &T_vec_u32, 2, TRUE, FALSE, 0, "vec_lvxl:19", "*lvxl", CODE_FOR_xlfii_load, B_UID(517) };
+static const struct builtin B20_vec_lvxl = { { &T_int, &T_const_vec_u8_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, FALSE, 0, "vec_lvxl:20", "*lvxl", CODE_FOR_xlfii_load, B_UID(518) };
+static const struct builtin B_vec_vlogefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vlogefp", "*vlogefp", CODE_FOR_xfx_fp, B_UID(519) };
+static const struct builtin B1_vec_lvsl = { { &T_int, &T_const_volatile_float_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:1", "*lvsl", CODE_FOR_xfii_load, B_UID(520) };
+static const struct builtin B2_vec_lvsl = { { &T_int, &T_const_volatile_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:2", "*lvsl", CODE_FOR_xfii_load, B_UID(521) };
+static const struct builtin B3_vec_lvsl = { { &T_int, &T_const_volatile_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:3", "*lvsl", CODE_FOR_xfii_load, B_UID(522) };
+static const struct builtin B4_vec_lvsl = { { &T_int, &T_const_volatile_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:4", "*lvsl", CODE_FOR_xfii_load, B_UID(523) };
+static const struct builtin B5_vec_lvsl = { { &T_int, &T_const_volatile_signed_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:5", "*lvsl", CODE_FOR_xfii_load, B_UID(524) };
+static const struct builtin B6_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:6", "*lvsl", CODE_FOR_xfii_load, B_UID(525) };
+static const struct builtin B7_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:7", "*lvsl", CODE_FOR_xfii_load, B_UID(526) };
+static const struct builtin B8_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:8", "*lvsl", CODE_FOR_xfii_load, B_UID(527) };
+static const struct builtin B9_vec_lvsl = { { &T_int, &T_const_volatile_unsigned_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 8, "vec_lvsl:9", "*lvsl", CODE_FOR_xfii_load, B_UID(528) };
+static const struct builtin B1_vec_lvsr = { { &T_int, &T_const_volatile_float_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:1", "*lvsr", CODE_FOR_xfii_load, B_UID(529) };
+static const struct builtin B2_vec_lvsr = { { &T_int, &T_const_volatile_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:2", "*lvsr", CODE_FOR_xfii_load, B_UID(530) };
+static const struct builtin B3_vec_lvsr = { { &T_int, &T_const_volatile_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:3", "*lvsr", CODE_FOR_xfii_load, B_UID(531) };
+static const struct builtin B4_vec_lvsr = { { &T_int, &T_const_volatile_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:4", "*lvsr", CODE_FOR_xfii_load, B_UID(532) };
+static const struct builtin B5_vec_lvsr = { { &T_int, &T_const_volatile_signed_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:5", "*lvsr", CODE_FOR_xfii_load, B_UID(533) };
+static const struct builtin B6_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_char_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:6", "*lvsr", CODE_FOR_xfii_load, B_UID(534) };
+static const struct builtin B7_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_int_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:7", "*lvsr", CODE_FOR_xfii_load, B_UID(535) };
+static const struct builtin B8_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_long_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:8", "*lvsr", CODE_FOR_xfii_load, B_UID(536) };
+static const struct builtin B9_vec_lvsr = { { &T_int, &T_const_volatile_unsigned_short_ptr, NULL, }, "ii", &T_vec_u8, 2, TRUE, TRUE, 9, "vec_lvsr:9", "*lvsr", CODE_FOR_xfii_load, B_UID(537) };
+static const struct builtin B_vec_vmaddfp = { { &T_vec_f32, &T_vec_f32, &T_vec_f32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vmaddfp", "*vmaddfp", CODE_FOR_xfxxx_fp, B_UID(538) };
+static const struct builtin B_vec_vmhaddshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmhaddshs", "*vmhaddshs", CODE_FOR_xfxxx_complex, B_UID(539) };
+static const struct builtin B1_vec_vmaxsh = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:1", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(540) };
+static const struct builtin B1_vec_vmaxuh = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:1", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(541) };
+static const struct builtin B1_vec_vmaxsw = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:1", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(542) };
+static const struct builtin B1_vec_vmaxuw = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:1", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(543) };
+static const struct builtin B1_vec_vmaxsb = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:1", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(544) };
+static const struct builtin B1_vec_vmaxub = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:1", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(545) };
+static const struct builtin B_vec_vmaxfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vmaxfp", "*vmaxfp", CODE_FOR_xfxx_simple, B_UID(546) };
+static const struct builtin B2_vec_vmaxsh = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:2", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(547) };
+static const struct builtin B3_vec_vmaxsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vmaxsh:3", "*vmaxsh", CODE_FOR_xfxx_simple, B_UID(548) };
+static const struct builtin B2_vec_vmaxsw = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:2", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(549) };
+static const struct builtin B3_vec_vmaxsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vmaxsw:3", "*vmaxsw", CODE_FOR_xfxx_simple, B_UID(550) };
+static const struct builtin B2_vec_vmaxsb = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:2", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(551) };
+static const struct builtin B3_vec_vmaxsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vmaxsb:3", "*vmaxsb", CODE_FOR_xfxx_simple, B_UID(552) };
+static const struct builtin B2_vec_vmaxuh = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:2", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(553) };
+static const struct builtin B3_vec_vmaxuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vmaxuh:3", "*vmaxuh", CODE_FOR_xfxx_simple, B_UID(554) };
+static const struct builtin B2_vec_vmaxuw = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:2", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(555) };
+static const struct builtin B3_vec_vmaxuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vmaxuw:3", "*vmaxuw", CODE_FOR_xfxx_simple, B_UID(556) };
+static const struct builtin B2_vec_vmaxub = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:2", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(557) };
+static const struct builtin B3_vec_vmaxub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vmaxub:3", "*vmaxub", CODE_FOR_xfxx_simple, B_UID(558) };
+static const struct builtin B1_vec_vmrghh = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vmrghh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(559) };
+static const struct builtin B1_vec_vmrghw = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vmrghw:1", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(560) };
+static const struct builtin B1_vec_vmrghb = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vmrghb:1", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(561) };
+static const struct builtin B2_vec_vmrghw = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vmrghw:2", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(562) };
+static const struct builtin B2_vec_vmrghh = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vmrghh:2", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(563) };
+static const struct builtin B3_vec_vmrghh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmrghh:3", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(564) };
+static const struct builtin B3_vec_vmrghw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmrghw:3", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(565) };
+static const struct builtin B2_vec_vmrghb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vmrghb:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(566) };
+static const struct builtin B4_vec_vmrghh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmrghh:4", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(567) };
+static const struct builtin B4_vec_vmrghw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmrghw:4", "*vmrghw", CODE_FOR_xfxx_perm, B_UID(568) };
+static const struct builtin B3_vec_vmrghb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vmrghb:3", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(569) };
+static const struct builtin B1_vec_vmrglh = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vmrglh:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(570) };
+static const struct builtin B1_vec_vmrglw = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vmrglw:1", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(571) };
+static const struct builtin B1_vec_vmrglb = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vmrglb:1", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(572) };
+static const struct builtin B2_vec_vmrglw = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vmrglw:2", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(573) };
+static const struct builtin B2_vec_vmrglh = { { &T_vec_p16, &T_vec_p16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vmrglh:2", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(574) };
+static const struct builtin B3_vec_vmrglh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmrglh:3", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(575) };
+static const struct builtin B3_vec_vmrglw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmrglw:3", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(576) };
+static const struct builtin B2_vec_vmrglb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vmrglb:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(577) };
+static const struct builtin B4_vec_vmrglh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmrglh:4", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(578) };
+static const struct builtin B4_vec_vmrglw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmrglw:4", "*vmrglw", CODE_FOR_xfxx_perm, B_UID(579) };
+static const struct builtin B3_vec_vmrglb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vmrglb:3", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(580) };
+static const struct builtin B_vec_mfvscr = { { NULL, NULL, NULL, }, "", &T_volatile_vec_u16, 0, FALSE, FALSE, 0, "vec_mfvscr", "*mfvscr", CODE_FOR_vxf_fxu, B_UID(581) };
+static const struct builtin B1_vec_vminsh = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:1", "*vminsh", CODE_FOR_xfxx_simple, B_UID(582) };
+static const struct builtin B1_vec_vminuh = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:1", "*vminuh", CODE_FOR_xfxx_simple, B_UID(583) };
+static const struct builtin B1_vec_vminsw = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:1", "*vminsw", CODE_FOR_xfxx_simple, B_UID(584) };
+static const struct builtin B1_vec_vminuw = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:1", "*vminuw", CODE_FOR_xfxx_simple, B_UID(585) };
+static const struct builtin B1_vec_vminsb = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:1", "*vminsb", CODE_FOR_xfxx_simple, B_UID(586) };
+static const struct builtin B1_vec_vminub = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:1", "*vminub", CODE_FOR_xfxx_simple, B_UID(587) };
+static const struct builtin B_vec_vminfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vminfp", "*vminfp", CODE_FOR_xfxx_simple, B_UID(588) };
+static const struct builtin B2_vec_vminsh = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:2", "*vminsh", CODE_FOR_xfxx_simple, B_UID(589) };
+static const struct builtin B3_vec_vminsh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vminsh:3", "*vminsh", CODE_FOR_xfxx_simple, B_UID(590) };
+static const struct builtin B2_vec_vminsw = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:2", "*vminsw", CODE_FOR_xfxx_simple, B_UID(591) };
+static const struct builtin B3_vec_vminsw = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vminsw:3", "*vminsw", CODE_FOR_xfxx_simple, B_UID(592) };
+static const struct builtin B2_vec_vminsb = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:2", "*vminsb", CODE_FOR_xfxx_simple, B_UID(593) };
+static const struct builtin B3_vec_vminsb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vminsb:3", "*vminsb", CODE_FOR_xfxx_simple, B_UID(594) };
+static const struct builtin B2_vec_vminuh = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:2", "*vminuh", CODE_FOR_xfxx_simple, B_UID(595) };
+static const struct builtin B3_vec_vminuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vminuh:3", "*vminuh", CODE_FOR_xfxx_simple, B_UID(596) };
+static const struct builtin B2_vec_vminuw = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:2", "*vminuw", CODE_FOR_xfxx_simple, B_UID(597) };
+static const struct builtin B3_vec_vminuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vminuw:3", "*vminuw", CODE_FOR_xfxx_simple, B_UID(598) };
+static const struct builtin B2_vec_vminub = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:2", "*vminub", CODE_FOR_xfxx_simple, B_UID(599) };
+static const struct builtin B3_vec_vminub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vminub:3", "*vminub", CODE_FOR_xfxx_simple, B_UID(600) };
+static const struct builtin B1_vec_vmladduhm = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:1", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(601) };
+static const struct builtin B2_vec_vmladduhm = { { &T_vec_s16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:2", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(602) };
+static const struct builtin B3_vec_vmladduhm = { { &T_vec_u16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmladduhm:3", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(603) };
+static const struct builtin B4_vec_vmladduhm = { { &T_vec_u16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vmladduhm:4", "*vmladduhm", CODE_FOR_xfxxx_complex, B_UID(604) };
+static const struct builtin B_vec_vmhraddshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vmhraddshs", "*vmhraddshs", CODE_FOR_xfxxx_complex, B_UID(605) };
+static const struct builtin B_vec_vmsumshm = { { &T_vec_s16, &T_vec_s16, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsumshm", "*vmsumshm", CODE_FOR_xfxxx_complex, B_UID(606) };
+static const struct builtin B_vec_vmsummbm = { { &T_vec_s8, &T_vec_u8, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsummbm", "*vmsummbm", CODE_FOR_xfxxx_complex, B_UID(607) };
+static const struct builtin B_vec_vmsumuhm = { { &T_vec_u16, &T_vec_u16, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumuhm", "*vmsumuhm", CODE_FOR_xfxxx_complex, B_UID(608) };
+static const struct builtin B_vec_vmsumubm = { { &T_vec_u8, &T_vec_u8, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumubm", "*vmsumubm", CODE_FOR_xfxxx_complex, B_UID(609) };
+static const struct builtin B_vec_vmsumshs = { { &T_vec_s16, &T_vec_s16, &T_vec_s32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vmsumshs", "*vmsumshs", CODE_FOR_xfxxx_complex, B_UID(610) };
+static const struct builtin B_vec_vmsumuhs = { { &T_vec_u16, &T_vec_u16, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vmsumuhs", "*vmsumuhs", CODE_FOR_xfxxx_complex, B_UID(611) };
+static const struct builtin B1_vec_mtvscr = { { &T_vec_b16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:1", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(612) };
+static const struct builtin B2_vec_mtvscr = { { &T_vec_b32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:2", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(613) };
+static const struct builtin B3_vec_mtvscr = { { &T_vec_b8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:3", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(614) };
+static const struct builtin B4_vec_mtvscr = { { &T_vec_p16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:4", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(615) };
+static const struct builtin B5_vec_mtvscr = { { &T_vec_s16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:5", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(616) };
+static const struct builtin B6_vec_mtvscr = { { &T_vec_s32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:6", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(617) };
+static const struct builtin B7_vec_mtvscr = { { &T_vec_s8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:7", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(618) };
+static const struct builtin B8_vec_mtvscr = { { &T_vec_u16, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:8", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(619) };
+static const struct builtin B9_vec_mtvscr = { { &T_vec_u32, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:9", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(620) };
+static const struct builtin B10_vec_mtvscr = { { &T_vec_u8, NULL, NULL, }, "x", &T_volatile_void, 1, FALSE, FALSE, 0, "vec_mtvscr:10", "*mtvscr", CODE_FOR_vfx_fxu, B_UID(621) };
+static const struct builtin B_vec_vmulesh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmulesh", "*vmulesh", CODE_FOR_xfxx_complex, B_UID(622) };
+static const struct builtin B_vec_vmulesb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmulesb", "*vmulesb", CODE_FOR_xfxx_complex, B_UID(623) };
+static const struct builtin B_vec_vmuleuh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmuleuh", "*vmuleuh", CODE_FOR_xfxx_complex, B_UID(624) };
+static const struct builtin B_vec_vmuleub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmuleub", "*vmuleub", CODE_FOR_xfxx_complex, B_UID(625) };
+static const struct builtin B_vec_vmulosh = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vmulosh", "*vmulosh", CODE_FOR_xfxx_complex, B_UID(626) };
+static const struct builtin B_vec_vmulosb = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vmulosb", "*vmulosb", CODE_FOR_xfxx_complex, B_UID(627) };
+static const struct builtin B_vec_vmulouh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vmulouh", "*vmulouh", CODE_FOR_xfxx_complex, B_UID(628) };
+static const struct builtin B_vec_vmuloub = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vmuloub", "*vmuloub", CODE_FOR_xfxx_complex, B_UID(629) };
+static const struct builtin B_vec_vnmsubfp = { { &T_vec_f32, &T_vec_f32, &T_vec_f32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vnmsubfp", "*vnmsubfp", CODE_FOR_xfxxx_fp, B_UID(630) };
+static const struct builtin B1_vec_vnor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vnor:1", "*vnor", CODE_FOR_xfxx_simple, B_UID(631) };
+static const struct builtin B2_vec_vnor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vnor:2", "*vnor", CODE_FOR_xfxx_simple, B_UID(632) };
+static const struct builtin B3_vec_vnor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vnor:3", "*vnor", CODE_FOR_xfxx_simple, B_UID(633) };
+static const struct builtin B4_vec_vnor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vnor:4", "*vnor", CODE_FOR_xfxx_simple, B_UID(634) };
+static const struct builtin B5_vec_vnor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vnor:5", "*vnor", CODE_FOR_xfxx_simple, B_UID(635) };
+static const struct builtin B6_vec_vnor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vnor:6", "*vnor", CODE_FOR_xfxx_simple, B_UID(636) };
+static const struct builtin B7_vec_vnor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vnor:7", "*vnor", CODE_FOR_xfxx_simple, B_UID(637) };
+static const struct builtin B8_vec_vnor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vnor:8", "*vnor", CODE_FOR_xfxx_simple, B_UID(638) };
+static const struct builtin B9_vec_vnor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vnor:9", "*vnor", CODE_FOR_xfxx_simple, B_UID(639) };
+static const struct builtin B10_vec_vnor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vnor:10", "*vnor", CODE_FOR_xfxx_simple, B_UID(640) };
+static const struct builtin B1_vec_vor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 2, "vec_vor:1", "*vor", CODE_FOR_xfxx_simple, B_UID(641) };
+static const struct builtin B2_vec_vor = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:2", "*vor", CODE_FOR_xfxx_simple, B_UID(642) };
+static const struct builtin B3_vec_vor = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:3", "*vor", CODE_FOR_xfxx_simple, B_UID(643) };
+static const struct builtin B4_vec_vor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 2, "vec_vor:4", "*vor", CODE_FOR_xfxx_simple, B_UID(644) };
+static const struct builtin B5_vec_vor = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:5", "*vor", CODE_FOR_xfxx_simple, B_UID(645) };
+static const struct builtin B6_vec_vor = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:6", "*vor", CODE_FOR_xfxx_simple, B_UID(646) };
+static const struct builtin B7_vec_vor = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:7", "*vor", CODE_FOR_xfxx_simple, B_UID(647) };
+static const struct builtin B8_vec_vor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 2, "vec_vor:8", "*vor", CODE_FOR_xfxx_simple, B_UID(648) };
+static const struct builtin B9_vec_vor = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:9", "*vor", CODE_FOR_xfxx_simple, B_UID(649) };
+static const struct builtin B10_vec_vor = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:10", "*vor", CODE_FOR_xfxx_simple, B_UID(650) };
+static const struct builtin B11_vec_vor = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:11", "*vor", CODE_FOR_xfxx_simple, B_UID(651) };
+static const struct builtin B12_vec_vor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 2, "vec_vor:12", "*vor", CODE_FOR_xfxx_simple, B_UID(652) };
+static const struct builtin B13_vec_vor = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:13", "*vor", CODE_FOR_xfxx_simple, B_UID(653) };
+static const struct builtin B14_vec_vor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 2, "vec_vor:14", "*vor", CODE_FOR_xfxx_simple, B_UID(654) };
+static const struct builtin B15_vec_vor = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:15", "*vor", CODE_FOR_xfxx_simple, B_UID(655) };
+static const struct builtin B16_vec_vor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 2, "vec_vor:16", "*vor", CODE_FOR_xfxx_simple, B_UID(656) };
+static const struct builtin B17_vec_vor = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:17", "*vor", CODE_FOR_xfxx_simple, B_UID(657) };
+static const struct builtin B18_vec_vor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 2, "vec_vor:18", "*vor", CODE_FOR_xfxx_simple, B_UID(658) };
+static const struct builtin B19_vec_vor = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:19", "*vor", CODE_FOR_xfxx_simple, B_UID(659) };
+static const struct builtin B20_vec_vor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 2, "vec_vor:20", "*vor", CODE_FOR_xfxx_simple, B_UID(660) };
+static const struct builtin B21_vec_vor = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:21", "*vor", CODE_FOR_xfxx_simple, B_UID(661) };
+static const struct builtin B22_vec_vor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 2, "vec_vor:22", "*vor", CODE_FOR_xfxx_simple, B_UID(662) };
+static const struct builtin B23_vec_vor = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:23", "*vor", CODE_FOR_xfxx_simple, B_UID(663) };
+static const struct builtin B24_vec_vor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 2, "vec_vor:24", "*vor", CODE_FOR_xfxx_simple, B_UID(664) };
+static const struct builtin B1_vec_vpkuhum = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vpkuhum:1", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(665) };
+static const struct builtin B1_vec_vpkuwum = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vpkuwum:1", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(666) };
+static const struct builtin B2_vec_vpkuhum = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vpkuhum:2", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(667) };
+static const struct builtin B2_vec_vpkuwum = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vpkuwum:2", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(668) };
+static const struct builtin B3_vec_vpkuhum = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkuhum:3", "*vpkuhum", CODE_FOR_xfxx_perm, B_UID(669) };
+static const struct builtin B3_vec_vpkuwum = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkuwum:3", "*vpkuwum", CODE_FOR_xfxx_perm, B_UID(670) };
+static const struct builtin B_vec_vpkpx = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vpkpx", "*vpkpx", CODE_FOR_xfxx_perm, B_UID(671) };
+static const struct builtin B_vec_vpkshss = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vpkshss", "*vpkshss", CODE_FOR_xfxx_perm, B_UID(672) };
+static const struct builtin B_vec_vpkswss = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vpkswss", "*vpkswss", CODE_FOR_xfxx_perm, B_UID(673) };
+static const struct builtin B_vec_vpkuhus = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkuhus", "*vpkuhus", CODE_FOR_xfxx_perm, B_UID(674) };
+static const struct builtin B_vec_vpkuwus = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkuwus", "*vpkuwus", CODE_FOR_xfxx_perm, B_UID(675) };
+static const struct builtin B_vec_vpkshus = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vpkshus", "*vpkshus", CODE_FOR_xfxx_perm, B_UID(676) };
+static const struct builtin B_vec_vpkswus = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vpkswus", "*vpkswus", CODE_FOR_xfxx_perm, B_UID(677) };
+static const struct builtin B1_vec_vperm = { { &T_vec_b16, &T_vec_b16, &T_vec_u8, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vperm:1", "*vperm", CODE_FOR_xfxxx_perm, B_UID(678) };
+static const struct builtin B2_vec_vperm = { { &T_vec_b32, &T_vec_b32, &T_vec_u8, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vperm:2", "*vperm", CODE_FOR_xfxxx_perm, B_UID(679) };
+static const struct builtin B3_vec_vperm = { { &T_vec_b8, &T_vec_b8, &T_vec_u8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vperm:3", "*vperm", CODE_FOR_xfxxx_perm, B_UID(680) };
+static const struct builtin B4_vec_vperm = { { &T_vec_f32, &T_vec_f32, &T_vec_u8, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vperm:4", "*vperm", CODE_FOR_xfxxx_perm, B_UID(681) };
+static const struct builtin B5_vec_vperm = { { &T_vec_p16, &T_vec_p16, &T_vec_u8, }, "xxx", &T_vec_p16, 3, FALSE, FALSE, 0, "vec_vperm:5", "*vperm", CODE_FOR_xfxxx_perm, B_UID(682) };
+static const struct builtin B6_vec_vperm = { { &T_vec_s16, &T_vec_s16, &T_vec_u8, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vperm:6", "*vperm", CODE_FOR_xfxxx_perm, B_UID(683) };
+static const struct builtin B7_vec_vperm = { { &T_vec_s32, &T_vec_s32, &T_vec_u8, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vperm:7", "*vperm", CODE_FOR_xfxxx_perm, B_UID(684) };
+static const struct builtin B8_vec_vperm = { { &T_vec_s8, &T_vec_s8, &T_vec_u8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vperm:8", "*vperm", CODE_FOR_xfxxx_perm, B_UID(685) };
+static const struct builtin B9_vec_vperm = { { &T_vec_u16, &T_vec_u16, &T_vec_u8, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vperm:9", "*vperm", CODE_FOR_xfxxx_perm, B_UID(686) };
+static const struct builtin B10_vec_vperm = { { &T_vec_u32, &T_vec_u32, &T_vec_u8, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vperm:10", "*vperm", CODE_FOR_xfxxx_perm, B_UID(687) };
+static const struct builtin B11_vec_vperm = { { &T_vec_u8, &T_vec_u8, &T_vec_u8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vperm:11", "*vperm", CODE_FOR_xfxxx_perm, B_UID(688) };
+static const struct builtin B_vec_vrefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrefp", "*vrefp", CODE_FOR_xfx_fp, B_UID(689) };
+static const struct builtin B1_vec_vrlh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vrlh:1", "*vrlh", CODE_FOR_xfxx_simple, B_UID(690) };
+static const struct builtin B1_vec_vrlw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vrlw:1", "*vrlw", CODE_FOR_xfxx_simple, B_UID(691) };
+static const struct builtin B1_vec_vrlb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vrlb:1", "*vrlb", CODE_FOR_xfxx_simple, B_UID(692) };
+static const struct builtin B2_vec_vrlh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vrlh:2", "*vrlh", CODE_FOR_xfxx_simple, B_UID(693) };
+static const struct builtin B2_vec_vrlw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vrlw:2", "*vrlw", CODE_FOR_xfxx_simple, B_UID(694) };
+static const struct builtin B2_vec_vrlb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vrlb:2", "*vrlb", CODE_FOR_xfxx_simple, B_UID(695) };
+static const struct builtin B_vec_vrfin = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfin", "*vrfin", CODE_FOR_xfx_fp, B_UID(696) };
+static const struct builtin B_vec_vrsqrtefp = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrsqrtefp", "*vrsqrtefp", CODE_FOR_xfx_fp, B_UID(697) };
+static const struct builtin B1_vec_vsel = { { &T_vec_b16, &T_vec_b16, &T_vec_b16, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vsel:1", "*vsel", CODE_FOR_xfxxx_simple, B_UID(698) };
+static const struct builtin B2_vec_vsel = { { &T_vec_b16, &T_vec_b16, &T_vec_u16, }, "xxx", &T_vec_b16, 3, FALSE, FALSE, 0, "vec_vsel:2", "*vsel", CODE_FOR_xfxxx_simple, B_UID(699) };
+static const struct builtin B3_vec_vsel = { { &T_vec_b32, &T_vec_b32, &T_vec_b32, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vsel:3", "*vsel", CODE_FOR_xfxxx_simple, B_UID(700) };
+static const struct builtin B4_vec_vsel = { { &T_vec_b32, &T_vec_b32, &T_vec_u32, }, "xxx", &T_vec_b32, 3, FALSE, FALSE, 0, "vec_vsel:4", "*vsel", CODE_FOR_xfxxx_simple, B_UID(701) };
+static const struct builtin B5_vec_vsel = { { &T_vec_b8, &T_vec_b8, &T_vec_b8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vsel:5", "*vsel", CODE_FOR_xfxxx_simple, B_UID(702) };
+static const struct builtin B6_vec_vsel = { { &T_vec_b8, &T_vec_b8, &T_vec_u8, }, "xxx", &T_vec_b8, 3, FALSE, FALSE, 0, "vec_vsel:6", "*vsel", CODE_FOR_xfxxx_simple, B_UID(703) };
+static const struct builtin B7_vec_vsel = { { &T_vec_f32, &T_vec_f32, &T_vec_b32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vsel:7", "*vsel", CODE_FOR_xfxxx_simple, B_UID(704) };
+static const struct builtin B8_vec_vsel = { { &T_vec_f32, &T_vec_f32, &T_vec_u32, }, "xxx", &T_vec_f32, 3, FALSE, FALSE, 0, "vec_vsel:8", "*vsel", CODE_FOR_xfxxx_simple, B_UID(705) };
+static const struct builtin B9_vec_vsel = { { &T_vec_s16, &T_vec_s16, &T_vec_b16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vsel:9", "*vsel", CODE_FOR_xfxxx_simple, B_UID(706) };
+static const struct builtin B10_vec_vsel = { { &T_vec_s16, &T_vec_s16, &T_vec_u16, }, "xxx", &T_vec_s16, 3, FALSE, FALSE, 0, "vec_vsel:10", "*vsel", CODE_FOR_xfxxx_simple, B_UID(707) };
+static const struct builtin B11_vec_vsel = { { &T_vec_s32, &T_vec_s32, &T_vec_b32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vsel:11", "*vsel", CODE_FOR_xfxxx_simple, B_UID(708) };
+static const struct builtin B12_vec_vsel = { { &T_vec_s32, &T_vec_s32, &T_vec_u32, }, "xxx", &T_vec_s32, 3, FALSE, FALSE, 0, "vec_vsel:12", "*vsel", CODE_FOR_xfxxx_simple, B_UID(709) };
+static const struct builtin B13_vec_vsel = { { &T_vec_s8, &T_vec_s8, &T_vec_b8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vsel:13", "*vsel", CODE_FOR_xfxxx_simple, B_UID(710) };
+static const struct builtin B14_vec_vsel = { { &T_vec_s8, &T_vec_s8, &T_vec_u8, }, "xxx", &T_vec_s8, 3, FALSE, FALSE, 0, "vec_vsel:14", "*vsel", CODE_FOR_xfxxx_simple, B_UID(711) };
+static const struct builtin B15_vec_vsel = { { &T_vec_u16, &T_vec_u16, &T_vec_b16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vsel:15", "*vsel", CODE_FOR_xfxxx_simple, B_UID(712) };
+static const struct builtin B16_vec_vsel = { { &T_vec_u16, &T_vec_u16, &T_vec_u16, }, "xxx", &T_vec_u16, 3, FALSE, FALSE, 0, "vec_vsel:16", "*vsel", CODE_FOR_xfxxx_simple, B_UID(713) };
+static const struct builtin B17_vec_vsel = { { &T_vec_u32, &T_vec_u32, &T_vec_b32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vsel:17", "*vsel", CODE_FOR_xfxxx_simple, B_UID(714) };
+static const struct builtin B18_vec_vsel = { { &T_vec_u32, &T_vec_u32, &T_vec_u32, }, "xxx", &T_vec_u32, 3, FALSE, FALSE, 0, "vec_vsel:18", "*vsel", CODE_FOR_xfxxx_simple, B_UID(715) };
+static const struct builtin B19_vec_vsel = { { &T_vec_u8, &T_vec_u8, &T_vec_b8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vsel:19", "*vsel", CODE_FOR_xfxxx_simple, B_UID(716) };
+static const struct builtin B20_vec_vsel = { { &T_vec_u8, &T_vec_u8, &T_vec_u8, }, "xxx", &T_vec_u8, 3, FALSE, FALSE, 0, "vec_vsel:20", "*vsel", CODE_FOR_xfxxx_simple, B_UID(717) };
+static const struct builtin B1_vec_vslh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslh:1", "*vslh", CODE_FOR_xfxx_simple, B_UID(718) };
+static const struct builtin B1_vec_vslw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslw:1", "*vslw", CODE_FOR_xfxx_simple, B_UID(719) };
+static const struct builtin B1_vec_vslb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslb:1", "*vslb", CODE_FOR_xfxx_simple, B_UID(720) };
+static const struct builtin B2_vec_vslh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslh:2", "*vslh", CODE_FOR_xfxx_simple, B_UID(721) };
+static const struct builtin B2_vec_vslw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslw:2", "*vslw", CODE_FOR_xfxx_simple, B_UID(722) };
+static const struct builtin B2_vec_vslb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslb:2", "*vslb", CODE_FOR_xfxx_simple, B_UID(723) };
+static const struct builtin B1_vec_vsldoi = { { &T_vec_b16, &T_vec_b16, &T_immed_u4, }, "xxC", &T_vec_b16, 3, FALSE, FALSE, 3, "vec_vsldoi:1", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(724) };
+static const struct builtin B2_vec_vsldoi = { { &T_vec_b32, &T_vec_b32, &T_immed_u4, }, "xxC", &T_vec_b32, 3, FALSE, FALSE, 3, "vec_vsldoi:2", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(725) };
+static const struct builtin B3_vec_vsldoi = { { &T_vec_b8, &T_vec_b8, &T_immed_u4, }, "xxC", &T_vec_b8, 3, FALSE, FALSE, 3, "vec_vsldoi:3", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(726) };
+static const struct builtin B4_vec_vsldoi = { { &T_vec_f32, &T_vec_f32, &T_immed_u4, }, "xxC", &T_vec_f32, 3, FALSE, FALSE, 3, "vec_vsldoi:4", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(727) };
+static const struct builtin B5_vec_vsldoi = { { &T_vec_p16, &T_vec_p16, &T_immed_u4, }, "xxC", &T_vec_p16, 3, FALSE, FALSE, 3, "vec_vsldoi:5", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(728) };
+static const struct builtin B6_vec_vsldoi = { { &T_vec_s16, &T_vec_s16, &T_immed_u4, }, "xxC", &T_vec_s16, 3, FALSE, FALSE, 3, "vec_vsldoi:6", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(729) };
+static const struct builtin B7_vec_vsldoi = { { &T_vec_s32, &T_vec_s32, &T_immed_u4, }, "xxC", &T_vec_s32, 3, FALSE, FALSE, 3, "vec_vsldoi:7", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(730) };
+static const struct builtin B8_vec_vsldoi = { { &T_vec_s8, &T_vec_s8, &T_immed_u4, }, "xxC", &T_vec_s8, 3, FALSE, FALSE, 3, "vec_vsldoi:8", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(731) };
+static const struct builtin B9_vec_vsldoi = { { &T_vec_u16, &T_vec_u16, &T_immed_u4, }, "xxC", &T_vec_u16, 3, FALSE, FALSE, 3, "vec_vsldoi:9", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(732) };
+static const struct builtin B10_vec_vsldoi = { { &T_vec_u32, &T_vec_u32, &T_immed_u4, }, "xxC", &T_vec_u32, 3, FALSE, FALSE, 3, "vec_vsldoi:10", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(733) };
+static const struct builtin B11_vec_vsldoi = { { &T_vec_u8, &T_vec_u8, &T_immed_u4, }, "xxC", &T_vec_u8, 3, FALSE, FALSE, 3, "vec_vsldoi:11", "*vsldoi", CODE_FOR_xfxxC_perm, B_UID(734) };
+static const struct builtin B1_vec_vsl = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:1", "*vsl", CODE_FOR_xfxx_simple, B_UID(735) };
+static const struct builtin B2_vec_vsl = { { &T_vec_b16, &T_vec_u32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:2", "*vsl", CODE_FOR_xfxx_simple, B_UID(736) };
+static const struct builtin B3_vec_vsl = { { &T_vec_b16, &T_vec_u8, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsl:3", "*vsl", CODE_FOR_xfxx_simple, B_UID(737) };
+static const struct builtin B4_vec_vsl = { { &T_vec_b32, &T_vec_u16, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:4", "*vsl", CODE_FOR_xfxx_simple, B_UID(738) };
+static const struct builtin B5_vec_vsl = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:5", "*vsl", CODE_FOR_xfxx_simple, B_UID(739) };
+static const struct builtin B6_vec_vsl = { { &T_vec_b32, &T_vec_u8, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsl:6", "*vsl", CODE_FOR_xfxx_simple, B_UID(740) };
+static const struct builtin B7_vec_vsl = { { &T_vec_b8, &T_vec_u16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:7", "*vsl", CODE_FOR_xfxx_simple, B_UID(741) };
+static const struct builtin B8_vec_vsl = { { &T_vec_b8, &T_vec_u32, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:8", "*vsl", CODE_FOR_xfxx_simple, B_UID(742) };
+static const struct builtin B9_vec_vsl = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsl:9", "*vsl", CODE_FOR_xfxx_simple, B_UID(743) };
+static const struct builtin B10_vec_vsl = { { &T_vec_p16, &T_vec_u16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:10", "*vsl", CODE_FOR_xfxx_simple, B_UID(744) };
+static const struct builtin B11_vec_vsl = { { &T_vec_p16, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:11", "*vsl", CODE_FOR_xfxx_simple, B_UID(745) };
+static const struct builtin B12_vec_vsl = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsl:12", "*vsl", CODE_FOR_xfxx_simple, B_UID(746) };
+static const struct builtin B13_vec_vsl = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:13", "*vsl", CODE_FOR_xfxx_simple, B_UID(747) };
+static const struct builtin B14_vec_vsl = { { &T_vec_s16, &T_vec_u32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:14", "*vsl", CODE_FOR_xfxx_simple, B_UID(748) };
+static const struct builtin B15_vec_vsl = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsl:15", "*vsl", CODE_FOR_xfxx_simple, B_UID(749) };
+static const struct builtin B16_vec_vsl = { { &T_vec_s32, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:16", "*vsl", CODE_FOR_xfxx_simple, B_UID(750) };
+static const struct builtin B17_vec_vsl = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:17", "*vsl", CODE_FOR_xfxx_simple, B_UID(751) };
+static const struct builtin B18_vec_vsl = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsl:18", "*vsl", CODE_FOR_xfxx_simple, B_UID(752) };
+static const struct builtin B19_vec_vsl = { { &T_vec_s8, &T_vec_u16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:19", "*vsl", CODE_FOR_xfxx_simple, B_UID(753) };
+static const struct builtin B20_vec_vsl = { { &T_vec_s8, &T_vec_u32, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:20", "*vsl", CODE_FOR_xfxx_simple, B_UID(754) };
+static const struct builtin B21_vec_vsl = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsl:21", "*vsl", CODE_FOR_xfxx_simple, B_UID(755) };
+static const struct builtin B22_vec_vsl = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:22", "*vsl", CODE_FOR_xfxx_simple, B_UID(756) };
+static const struct builtin B23_vec_vsl = { { &T_vec_u16, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:23", "*vsl", CODE_FOR_xfxx_simple, B_UID(757) };
+static const struct builtin B24_vec_vsl = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsl:24", "*vsl", CODE_FOR_xfxx_simple, B_UID(758) };
+static const struct builtin B25_vec_vsl = { { &T_vec_u32, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:25", "*vsl", CODE_FOR_xfxx_simple, B_UID(759) };
+static const struct builtin B26_vec_vsl = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:26", "*vsl", CODE_FOR_xfxx_simple, B_UID(760) };
+static const struct builtin B27_vec_vsl = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsl:27", "*vsl", CODE_FOR_xfxx_simple, B_UID(761) };
+static const struct builtin B28_vec_vsl = { { &T_vec_u8, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:28", "*vsl", CODE_FOR_xfxx_simple, B_UID(762) };
+static const struct builtin B29_vec_vsl = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:29", "*vsl", CODE_FOR_xfxx_simple, B_UID(763) };
+static const struct builtin B30_vec_vsl = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsl:30", "*vsl", CODE_FOR_xfxx_simple, B_UID(764) };
+static const struct builtin B1_vec_vslo = { { &T_vec_f32, &T_vec_s8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vslo:1", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(765) };
+static const struct builtin B2_vec_vslo = { { &T_vec_f32, &T_vec_u8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vslo:2", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(766) };
+static const struct builtin B3_vec_vslo = { { &T_vec_p16, &T_vec_s8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vslo:3", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(767) };
+static const struct builtin B4_vec_vslo = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vslo:4", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(768) };
+static const struct builtin B5_vec_vslo = { { &T_vec_s16, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslo:5", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(769) };
+static const struct builtin B6_vec_vslo = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vslo:6", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(770) };
+static const struct builtin B7_vec_vslo = { { &T_vec_s32, &T_vec_s8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslo:7", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(771) };
+static const struct builtin B8_vec_vslo = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vslo:8", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(772) };
+static const struct builtin B9_vec_vslo = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslo:9", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(773) };
+static const struct builtin B10_vec_vslo = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vslo:10", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(774) };
+static const struct builtin B11_vec_vslo = { { &T_vec_u16, &T_vec_s8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslo:11", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(775) };
+static const struct builtin B12_vec_vslo = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vslo:12", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(776) };
+static const struct builtin B13_vec_vslo = { { &T_vec_u32, &T_vec_s8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslo:13", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(777) };
+static const struct builtin B14_vec_vslo = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vslo:14", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(778) };
+static const struct builtin B15_vec_vslo = { { &T_vec_u8, &T_vec_s8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslo:15", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(779) };
+static const struct builtin B16_vec_vslo = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vslo:16", "*vslo", CODE_FOR_xfxx_perm_bug, B_UID(780) };
+static const struct builtin B1_vec_vsplth = { { &T_vec_b16, &T_immed_u5, NULL, }, "xB", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsplth:1", "*vsplth", CODE_FOR_xfxB_perm, B_UID(781) };
+static const struct builtin B1_vec_vspltw = { { &T_vec_b32, &T_immed_u5, NULL, }, "xB", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vspltw:1", "*vspltw", CODE_FOR_xfxB_perm, B_UID(782) };
+static const struct builtin B1_vec_vspltb = { { &T_vec_b8, &T_immed_u5, NULL, }, "xB", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vspltb:1", "*vspltb", CODE_FOR_xfxB_perm, B_UID(783) };
+static const struct builtin B2_vec_vspltw = { { &T_vec_f32, &T_immed_u5, NULL, }, "xB", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vspltw:2", "*vspltw", CODE_FOR_xfxB_perm, B_UID(784) };
+static const struct builtin B2_vec_vsplth = { { &T_vec_p16, &T_immed_u5, NULL, }, "xB", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsplth:2", "*vsplth", CODE_FOR_xfxB_perm, B_UID(785) };
+static const struct builtin B3_vec_vsplth = { { &T_vec_s16, &T_immed_u5, NULL, }, "xB", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsplth:3", "*vsplth", CODE_FOR_xfxB_perm, B_UID(786) };
+static const struct builtin B3_vec_vspltw = { { &T_vec_s32, &T_immed_u5, NULL, }, "xB", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vspltw:3", "*vspltw", CODE_FOR_xfxB_perm, B_UID(787) };
+static const struct builtin B2_vec_vspltb = { { &T_vec_s8, &T_immed_u5, NULL, }, "xB", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vspltb:2", "*vspltb", CODE_FOR_xfxB_perm, B_UID(788) };
+static const struct builtin B4_vec_vsplth = { { &T_vec_u16, &T_immed_u5, NULL, }, "xB", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsplth:4", "*vsplth", CODE_FOR_xfxB_perm, B_UID(789) };
+static const struct builtin B4_vec_vspltw = { { &T_vec_u32, &T_immed_u5, NULL, }, "xB", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vspltw:4", "*vspltw", CODE_FOR_xfxB_perm, B_UID(790) };
+static const struct builtin B3_vec_vspltb = { { &T_vec_u8, &T_immed_u5, NULL, }, "xB", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vspltb:3", "*vspltb", CODE_FOR_xfxB_perm, B_UID(791) };
+static const struct builtin B_vec_vspltish = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s16, 1, FALSE, FALSE, 5, "vec_vspltish", "*vspltish", CODE_FOR_xfA_perm, B_UID(792) };
+static const struct builtin B_vec_vspltisw = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s32, 1, FALSE, FALSE, 6, "vec_vspltisw", "*vspltisw", CODE_FOR_xfA_perm, B_UID(793) };
+static const struct builtin B_vec_vspltisb = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_s8, 1, FALSE, FALSE, 4, "vec_vspltisb", "*vspltisb", CODE_FOR_xfA_perm, B_UID(794) };
+static const struct builtin B_vec_splat_u16 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u16, 1, FALSE, FALSE, 5, "vec_splat_u16", "*vspltish", CODE_FOR_xfA_perm, B_UID(795) };
+static const struct builtin B_vec_splat_u32 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u32, 1, FALSE, FALSE, 6, "vec_splat_u32", "*vspltisw", CODE_FOR_xfA_perm, B_UID(796) };
+static const struct builtin B_vec_splat_u8 = { { &T_immed_s5, NULL, NULL, }, "A", &T_vec_u8, 1, FALSE, FALSE, 4, "vec_splat_u8", "*vspltisb", CODE_FOR_xfA_perm, B_UID(797) };
+static const struct builtin B1_vec_vsrh = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsrh:1", "*vsrh", CODE_FOR_xfxx_simple, B_UID(798) };
+static const struct builtin B1_vec_vsrw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsrw:1", "*vsrw", CODE_FOR_xfxx_simple, B_UID(799) };
+static const struct builtin B1_vec_vsrb = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsrb:1", "*vsrb", CODE_FOR_xfxx_simple, B_UID(800) };
+static const struct builtin B2_vec_vsrh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsrh:2", "*vsrh", CODE_FOR_xfxx_simple, B_UID(801) };
+static const struct builtin B2_vec_vsrw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsrw:2", "*vsrw", CODE_FOR_xfxx_simple, B_UID(802) };
+static const struct builtin B2_vec_vsrb = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsrb:2", "*vsrb", CODE_FOR_xfxx_simple, B_UID(803) };
+static const struct builtin B1_vec_vsrah = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsrah:1", "*vsrah", CODE_FOR_xfxx_simple, B_UID(804) };
+static const struct builtin B1_vec_vsraw = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsraw:1", "*vsraw", CODE_FOR_xfxx_simple, B_UID(805) };
+static const struct builtin B1_vec_vsrab = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsrab:1", "*vsrab", CODE_FOR_xfxx_simple, B_UID(806) };
+static const struct builtin B2_vec_vsrah = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsrah:2", "*vsrah", CODE_FOR_xfxx_simple, B_UID(807) };
+static const struct builtin B2_vec_vsraw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsraw:2", "*vsraw", CODE_FOR_xfxx_simple, B_UID(808) };
+static const struct builtin B2_vec_vsrab = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsrab:2", "*vsrab", CODE_FOR_xfxx_simple, B_UID(809) };
+static const struct builtin B1_vec_vsr = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:1", "*vsr", CODE_FOR_xfxx_simple, B_UID(810) };
+static const struct builtin B2_vec_vsr = { { &T_vec_b16, &T_vec_u32, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:2", "*vsr", CODE_FOR_xfxx_simple, B_UID(811) };
+static const struct builtin B3_vec_vsr = { { &T_vec_b16, &T_vec_u8, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 0, "vec_vsr:3", "*vsr", CODE_FOR_xfxx_simple, B_UID(812) };
+static const struct builtin B4_vec_vsr = { { &T_vec_b32, &T_vec_u16, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:4", "*vsr", CODE_FOR_xfxx_simple, B_UID(813) };
+static const struct builtin B5_vec_vsr = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:5", "*vsr", CODE_FOR_xfxx_simple, B_UID(814) };
+static const struct builtin B6_vec_vsr = { { &T_vec_b32, &T_vec_u8, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 0, "vec_vsr:6", "*vsr", CODE_FOR_xfxx_simple, B_UID(815) };
+static const struct builtin B7_vec_vsr = { { &T_vec_b8, &T_vec_u16, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:7", "*vsr", CODE_FOR_xfxx_simple, B_UID(816) };
+static const struct builtin B8_vec_vsr = { { &T_vec_b8, &T_vec_u32, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:8", "*vsr", CODE_FOR_xfxx_simple, B_UID(817) };
+static const struct builtin B9_vec_vsr = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 0, "vec_vsr:9", "*vsr", CODE_FOR_xfxx_simple, B_UID(818) };
+static const struct builtin B10_vec_vsr = { { &T_vec_p16, &T_vec_u16, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:10", "*vsr", CODE_FOR_xfxx_simple, B_UID(819) };
+static const struct builtin B11_vec_vsr = { { &T_vec_p16, &T_vec_u32, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:11", "*vsr", CODE_FOR_xfxx_simple, B_UID(820) };
+static const struct builtin B12_vec_vsr = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsr:12", "*vsr", CODE_FOR_xfxx_simple, B_UID(821) };
+static const struct builtin B13_vec_vsr = { { &T_vec_s16, &T_vec_u16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:13", "*vsr", CODE_FOR_xfxx_simple, B_UID(822) };
+static const struct builtin B14_vec_vsr = { { &T_vec_s16, &T_vec_u32, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:14", "*vsr", CODE_FOR_xfxx_simple, B_UID(823) };
+static const struct builtin B15_vec_vsr = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsr:15", "*vsr", CODE_FOR_xfxx_simple, B_UID(824) };
+static const struct builtin B16_vec_vsr = { { &T_vec_s32, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:16", "*vsr", CODE_FOR_xfxx_simple, B_UID(825) };
+static const struct builtin B17_vec_vsr = { { &T_vec_s32, &T_vec_u32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:17", "*vsr", CODE_FOR_xfxx_simple, B_UID(826) };
+static const struct builtin B18_vec_vsr = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsr:18", "*vsr", CODE_FOR_xfxx_simple, B_UID(827) };
+static const struct builtin B19_vec_vsr = { { &T_vec_s8, &T_vec_u16, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:19", "*vsr", CODE_FOR_xfxx_simple, B_UID(828) };
+static const struct builtin B20_vec_vsr = { { &T_vec_s8, &T_vec_u32, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:20", "*vsr", CODE_FOR_xfxx_simple, B_UID(829) };
+static const struct builtin B21_vec_vsr = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsr:21", "*vsr", CODE_FOR_xfxx_simple, B_UID(830) };
+static const struct builtin B22_vec_vsr = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:22", "*vsr", CODE_FOR_xfxx_simple, B_UID(831) };
+static const struct builtin B23_vec_vsr = { { &T_vec_u16, &T_vec_u32, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:23", "*vsr", CODE_FOR_xfxx_simple, B_UID(832) };
+static const struct builtin B24_vec_vsr = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsr:24", "*vsr", CODE_FOR_xfxx_simple, B_UID(833) };
+static const struct builtin B25_vec_vsr = { { &T_vec_u32, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:25", "*vsr", CODE_FOR_xfxx_simple, B_UID(834) };
+static const struct builtin B26_vec_vsr = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:26", "*vsr", CODE_FOR_xfxx_simple, B_UID(835) };
+static const struct builtin B27_vec_vsr = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsr:27", "*vsr", CODE_FOR_xfxx_simple, B_UID(836) };
+static const struct builtin B28_vec_vsr = { { &T_vec_u8, &T_vec_u16, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:28", "*vsr", CODE_FOR_xfxx_simple, B_UID(837) };
+static const struct builtin B29_vec_vsr = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:29", "*vsr", CODE_FOR_xfxx_simple, B_UID(838) };
+static const struct builtin B30_vec_vsr = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsr:30", "*vsr", CODE_FOR_xfxx_simple, B_UID(839) };
+static const struct builtin B1_vec_vsro = { { &T_vec_f32, &T_vec_s8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vsro:1", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(840) };
+static const struct builtin B2_vec_vsro = { { &T_vec_f32, &T_vec_u8, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 0, "vec_vsro:2", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(841) };
+static const struct builtin B3_vec_vsro = { { &T_vec_p16, &T_vec_s8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsro:3", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(842) };
+static const struct builtin B4_vec_vsro = { { &T_vec_p16, &T_vec_u8, NULL, }, "xx", &T_vec_p16, 2, FALSE, FALSE, 0, "vec_vsro:4", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(843) };
+static const struct builtin B5_vec_vsro = { { &T_vec_s16, &T_vec_s8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsro:5", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(844) };
+static const struct builtin B6_vec_vsro = { { &T_vec_s16, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_vsro:6", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(845) };
+static const struct builtin B7_vec_vsro = { { &T_vec_s32, &T_vec_s8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsro:7", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(846) };
+static const struct builtin B8_vec_vsro = { { &T_vec_s32, &T_vec_u8, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsro:8", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(847) };
+static const struct builtin B9_vec_vsro = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsro:9", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(848) };
+static const struct builtin B10_vec_vsro = { { &T_vec_s8, &T_vec_u8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 0, "vec_vsro:10", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(849) };
+static const struct builtin B11_vec_vsro = { { &T_vec_u16, &T_vec_s8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsro:11", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(850) };
+static const struct builtin B12_vec_vsro = { { &T_vec_u16, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_vsro:12", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(851) };
+static const struct builtin B13_vec_vsro = { { &T_vec_u32, &T_vec_s8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsro:13", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(852) };
+static const struct builtin B14_vec_vsro = { { &T_vec_u32, &T_vec_u8, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsro:14", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(853) };
+static const struct builtin B15_vec_vsro = { { &T_vec_u8, &T_vec_s8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsro:15", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(854) };
+static const struct builtin B16_vec_vsro = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 0, "vec_vsro:16", "*vsro", CODE_FOR_xfxx_perm_bug, B_UID(855) };
+static const struct builtin B1_vec_stvx = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:1", "*stvx", CODE_FOR_sfxii_store, B_UID(856) };
+static const struct builtin B2_vec_stvx = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:2", "*stvx", CODE_FOR_sfxii_store, B_UID(857) };
+static const struct builtin B3_vec_stvx = { { &T_vec_b16, &T_int, &T_vec_b16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:3", "*stvx", CODE_FOR_sfxii_store, B_UID(858) };
+static const struct builtin B4_vec_stvx = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:4", "*stvx", CODE_FOR_sfxii_store, B_UID(859) };
+static const struct builtin B5_vec_stvx = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:5", "*stvx", CODE_FOR_sfxii_store, B_UID(860) };
+static const struct builtin B6_vec_stvx = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:6", "*stvx", CODE_FOR_sfxii_store, B_UID(861) };
+static const struct builtin B7_vec_stvx = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:7", "*stvx", CODE_FOR_sfxii_store, B_UID(862) };
+static const struct builtin B8_vec_stvx = { { &T_vec_b32, &T_int, &T_vec_b32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:8", "*stvx", CODE_FOR_sfxii_store, B_UID(863) };
+static const struct builtin B9_vec_stvx = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:9", "*stvx", CODE_FOR_sfxii_store, B_UID(864) };
+static const struct builtin B10_vec_stvx = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:10", "*stvx", CODE_FOR_sfxii_store, B_UID(865) };
+static const struct builtin B11_vec_stvx = { { &T_vec_b8, &T_int, &T_vec_b8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:11", "*stvx", CODE_FOR_sfxii_store, B_UID(866) };
+static const struct builtin B12_vec_stvx = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:12", "*stvx", CODE_FOR_sfxii_store, B_UID(867) };
+static const struct builtin B13_vec_stvx = { { &T_vec_f32, &T_int, &T_vec_f32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:13", "*stvx", CODE_FOR_sfxii_store, B_UID(868) };
+static const struct builtin B14_vec_stvx = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:14", "*stvx", CODE_FOR_sfxii_store, B_UID(869) };
+static const struct builtin B15_vec_stvx = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:15", "*stvx", CODE_FOR_sfxii_store, B_UID(870) };
+static const struct builtin B16_vec_stvx = { { &T_vec_p16, &T_int, &T_vec_p16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:16", "*stvx", CODE_FOR_sfxii_store, B_UID(871) };
+static const struct builtin B17_vec_stvx = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:17", "*stvx", CODE_FOR_sfxii_store, B_UID(872) };
+static const struct builtin B18_vec_stvx = { { &T_vec_s16, &T_int, &T_vec_s16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:18", "*stvx", CODE_FOR_sfxii_store, B_UID(873) };
+static const struct builtin B19_vec_stvx = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:19", "*stvx", CODE_FOR_sfxii_store, B_UID(874) };
+static const struct builtin B20_vec_stvx = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:20", "*stvx", CODE_FOR_sfxii_store, B_UID(875) };
+static const struct builtin B21_vec_stvx = { { &T_vec_s32, &T_int, &T_vec_s32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:21", "*stvx", CODE_FOR_sfxii_store, B_UID(876) };
+static const struct builtin B22_vec_stvx = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:22", "*stvx", CODE_FOR_sfxii_store, B_UID(877) };
+static const struct builtin B23_vec_stvx = { { &T_vec_s8, &T_int, &T_vec_s8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:23", "*stvx", CODE_FOR_sfxii_store, B_UID(878) };
+static const struct builtin B24_vec_stvx = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:24", "*stvx", CODE_FOR_sfxii_store, B_UID(879) };
+static const struct builtin B25_vec_stvx = { { &T_vec_u16, &T_int, &T_vec_u16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:25", "*stvx", CODE_FOR_sfxii_store, B_UID(880) };
+static const struct builtin B26_vec_stvx = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:26", "*stvx", CODE_FOR_sfxii_store, B_UID(881) };
+static const struct builtin B27_vec_stvx = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:27", "*stvx", CODE_FOR_sfxii_store, B_UID(882) };
+static const struct builtin B28_vec_stvx = { { &T_vec_u32, &T_int, &T_vec_u32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:28", "*stvx", CODE_FOR_sfxii_store, B_UID(883) };
+static const struct builtin B29_vec_stvx = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:29", "*stvx", CODE_FOR_sfxii_store, B_UID(884) };
+static const struct builtin B30_vec_stvx = { { &T_vec_u8, &T_int, &T_vec_u8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvx:30", "*stvx", CODE_FOR_sfxii_store, B_UID(885) };
+static const struct builtin B1_vec_stvebx = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:1", "*stvebx", CODE_FOR_sfxii_store, B_UID(886) };
+static const struct builtin B2_vec_stvebx = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:2", "*stvebx", CODE_FOR_sfxii_store, B_UID(887) };
+static const struct builtin B1_vec_stvewx = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:1", "*stvewx", CODE_FOR_sfxii_store, B_UID(888) };
+static const struct builtin B2_vec_stvewx = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:2", "*stvewx", CODE_FOR_sfxii_store, B_UID(889) };
+static const struct builtin B3_vec_stvewx = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:3", "*stvewx", CODE_FOR_sfxii_store, B_UID(890) };
+static const struct builtin B4_vec_stvewx = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:4", "*stvewx", CODE_FOR_sfxii_store, B_UID(891) };
+static const struct builtin B3_vec_stvebx = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:3", "*stvebx", CODE_FOR_sfxii_store, B_UID(892) };
+static const struct builtin B4_vec_stvebx = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:4", "*stvebx", CODE_FOR_sfxii_store, B_UID(893) };
+static const struct builtin B5_vec_stvewx = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:5", "*stvewx", CODE_FOR_sfxii_store, B_UID(894) };
+static const struct builtin B1_vec_stvehx = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:1", "*stvehx", CODE_FOR_sfxii_store, B_UID(895) };
+static const struct builtin B2_vec_stvehx = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:2", "*stvehx", CODE_FOR_sfxii_store, B_UID(896) };
+static const struct builtin B3_vec_stvehx = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:3", "*stvehx", CODE_FOR_sfxii_store, B_UID(897) };
+static const struct builtin B6_vec_stvewx = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:6", "*stvewx", CODE_FOR_sfxii_store, B_UID(898) };
+static const struct builtin B7_vec_stvewx = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:7", "*stvewx", CODE_FOR_sfxii_store, B_UID(899) };
+static const struct builtin B5_vec_stvebx = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:5", "*stvebx", CODE_FOR_sfxii_store, B_UID(900) };
+static const struct builtin B4_vec_stvehx = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvehx:4", "*stvehx", CODE_FOR_sfxii_store, B_UID(901) };
+static const struct builtin B8_vec_stvewx = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:8", "*stvewx", CODE_FOR_sfxii_store, B_UID(902) };
+static const struct builtin B9_vec_stvewx = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvewx:9", "*stvewx", CODE_FOR_sfxii_store, B_UID(903) };
+static const struct builtin B6_vec_stvebx = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvebx:6", "*stvebx", CODE_FOR_sfxii_store, B_UID(904) };
+static const struct builtin B1_vec_stvxl = { { &T_vec_b16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:1", "*stvxl", CODE_FOR_sfxii_store, B_UID(905) };
+static const struct builtin B2_vec_stvxl = { { &T_vec_b16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:2", "*stvxl", CODE_FOR_sfxii_store, B_UID(906) };
+static const struct builtin B3_vec_stvxl = { { &T_vec_b16, &T_int, &T_vec_b16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:3", "*stvxl", CODE_FOR_sfxii_store, B_UID(907) };
+static const struct builtin B4_vec_stvxl = { { &T_vec_b32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:4", "*stvxl", CODE_FOR_sfxii_store, B_UID(908) };
+static const struct builtin B5_vec_stvxl = { { &T_vec_b32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:5", "*stvxl", CODE_FOR_sfxii_store, B_UID(909) };
+static const struct builtin B6_vec_stvxl = { { &T_vec_b32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:6", "*stvxl", CODE_FOR_sfxii_store, B_UID(910) };
+static const struct builtin B7_vec_stvxl = { { &T_vec_b32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:7", "*stvxl", CODE_FOR_sfxii_store, B_UID(911) };
+static const struct builtin B8_vec_stvxl = { { &T_vec_b32, &T_int, &T_vec_b32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:8", "*stvxl", CODE_FOR_sfxii_store, B_UID(912) };
+static const struct builtin B9_vec_stvxl = { { &T_vec_b8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:9", "*stvxl", CODE_FOR_sfxii_store, B_UID(913) };
+static const struct builtin B10_vec_stvxl = { { &T_vec_b8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:10", "*stvxl", CODE_FOR_sfxii_store, B_UID(914) };
+static const struct builtin B11_vec_stvxl = { { &T_vec_b8, &T_int, &T_vec_b8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:11", "*stvxl", CODE_FOR_sfxii_store, B_UID(915) };
+static const struct builtin B12_vec_stvxl = { { &T_vec_f32, &T_int, &T_float_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:12", "*stvxl", CODE_FOR_sfxii_store, B_UID(916) };
+static const struct builtin B13_vec_stvxl = { { &T_vec_f32, &T_int, &T_vec_f32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:13", "*stvxl", CODE_FOR_sfxii_store, B_UID(917) };
+static const struct builtin B14_vec_stvxl = { { &T_vec_p16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:14", "*stvxl", CODE_FOR_sfxii_store, B_UID(918) };
+static const struct builtin B15_vec_stvxl = { { &T_vec_p16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:15", "*stvxl", CODE_FOR_sfxii_store, B_UID(919) };
+static const struct builtin B16_vec_stvxl = { { &T_vec_p16, &T_int, &T_vec_p16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:16", "*stvxl", CODE_FOR_sfxii_store, B_UID(920) };
+static const struct builtin B17_vec_stvxl = { { &T_vec_s16, &T_int, &T_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:17", "*stvxl", CODE_FOR_sfxii_store, B_UID(921) };
+static const struct builtin B18_vec_stvxl = { { &T_vec_s16, &T_int, &T_vec_s16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:18", "*stvxl", CODE_FOR_sfxii_store, B_UID(922) };
+static const struct builtin B19_vec_stvxl = { { &T_vec_s32, &T_int, &T_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:19", "*stvxl", CODE_FOR_sfxii_store, B_UID(923) };
+static const struct builtin B20_vec_stvxl = { { &T_vec_s32, &T_int, &T_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:20", "*stvxl", CODE_FOR_sfxii_store, B_UID(924) };
+static const struct builtin B21_vec_stvxl = { { &T_vec_s32, &T_int, &T_vec_s32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:21", "*stvxl", CODE_FOR_sfxii_store, B_UID(925) };
+static const struct builtin B22_vec_stvxl = { { &T_vec_s8, &T_int, &T_signed_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:22", "*stvxl", CODE_FOR_sfxii_store, B_UID(926) };
+static const struct builtin B23_vec_stvxl = { { &T_vec_s8, &T_int, &T_vec_s8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:23", "*stvxl", CODE_FOR_sfxii_store, B_UID(927) };
+static const struct builtin B24_vec_stvxl = { { &T_vec_u16, &T_int, &T_unsigned_short_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:24", "*stvxl", CODE_FOR_sfxii_store, B_UID(928) };
+static const struct builtin B25_vec_stvxl = { { &T_vec_u16, &T_int, &T_vec_u16_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:25", "*stvxl", CODE_FOR_sfxii_store, B_UID(929) };
+static const struct builtin B26_vec_stvxl = { { &T_vec_u32, &T_int, &T_unsigned_int_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:26", "*stvxl", CODE_FOR_sfxii_store, B_UID(930) };
+static const struct builtin B27_vec_stvxl = { { &T_vec_u32, &T_int, &T_unsigned_long_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:27", "*stvxl", CODE_FOR_sfxii_store, B_UID(931) };
+static const struct builtin B28_vec_stvxl = { { &T_vec_u32, &T_int, &T_vec_u32_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:28", "*stvxl", CODE_FOR_sfxii_store, B_UID(932) };
+static const struct builtin B29_vec_stvxl = { { &T_vec_u8, &T_int, &T_unsigned_char_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:29", "*stvxl", CODE_FOR_sfxii_store, B_UID(933) };
+static const struct builtin B30_vec_stvxl = { { &T_vec_u8, &T_int, &T_vec_u8_ptr, }, "xii", &T_void, 3, FALSE, FALSE, 0, "vec_stvxl:30", "*stvxl", CODE_FOR_sfxii_store, B_UID(934) };
+static const struct builtin B1_vec_vsubuhm = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:1", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(935) };
+static const struct builtin B2_vec_vsubuhm = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:2", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(936) };
+static const struct builtin B1_vec_vsubuwm = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:1", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(937) };
+static const struct builtin B2_vec_vsubuwm = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:2", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(938) };
+static const struct builtin B1_vec_vsububm = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:1", "*vsububm", CODE_FOR_xfxx_simple, B_UID(939) };
+static const struct builtin B2_vec_vsububm = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:2", "*vsububm", CODE_FOR_xfxx_simple, B_UID(940) };
+static const struct builtin B_vec_vsubfp = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vsubfp", "*vsubfp", CODE_FOR_xfxx_fp, B_UID(941) };
+static const struct builtin B3_vec_vsubuhm = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:3", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(942) };
+static const struct builtin B4_vec_vsubuhm = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubuhm:4", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(943) };
+static const struct builtin B3_vec_vsubuwm = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:3", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(944) };
+static const struct builtin B4_vec_vsubuwm = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubuwm:4", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(945) };
+static const struct builtin B3_vec_vsububm = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:3", "*vsububm", CODE_FOR_xfxx_simple, B_UID(946) };
+static const struct builtin B4_vec_vsububm = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsububm:4", "*vsububm", CODE_FOR_xfxx_simple, B_UID(947) };
+static const struct builtin B5_vec_vsubuhm = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:5", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(948) };
+static const struct builtin B6_vec_vsubuhm = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhm:6", "*vsubuhm", CODE_FOR_xfxx_simple, B_UID(949) };
+static const struct builtin B5_vec_vsubuwm = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:5", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(950) };
+static const struct builtin B6_vec_vsubuwm = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuwm:6", "*vsubuwm", CODE_FOR_xfxx_simple, B_UID(951) };
+static const struct builtin B5_vec_vsububm = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:5", "*vsububm", CODE_FOR_xfxx_simple, B_UID(952) };
+static const struct builtin B6_vec_vsububm = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububm:6", "*vsububm", CODE_FOR_xfxx_simple, B_UID(953) };
+static const struct builtin B_vec_vsubcuw = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsubcuw", "*vsubcuw", CODE_FOR_xfxx_simple, B_UID(954) };
+static const struct builtin B1_vec_vsubshs = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:1", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(955) };
+static const struct builtin B1_vec_vsubuhs = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:1", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(956) };
+static const struct builtin B1_vec_vsubsws = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:1", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(957) };
+static const struct builtin B1_vec_vsubuws = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:1", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(958) };
+static const struct builtin B1_vec_vsubsbs = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:1", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(959) };
+static const struct builtin B1_vec_vsububs = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:1", "*vsububs", CODE_FOR_xfxx_simple, B_UID(960) };
+static const struct builtin B2_vec_vsubshs = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:2", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(961) };
+static const struct builtin B3_vec_vsubshs = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vsubshs:3", "*vsubshs", CODE_FOR_xfxx_simple, B_UID(962) };
+static const struct builtin B2_vec_vsubsws = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:2", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(963) };
+static const struct builtin B3_vec_vsubsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vsubsws:3", "*vsubsws", CODE_FOR_xfxx_simple, B_UID(964) };
+static const struct builtin B2_vec_vsubsbs = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:2", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(965) };
+static const struct builtin B3_vec_vsubsbs = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vsubsbs:3", "*vsubsbs", CODE_FOR_xfxx_simple, B_UID(966) };
+static const struct builtin B2_vec_vsubuhs = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:2", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(967) };
+static const struct builtin B3_vec_vsubuhs = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vsubuhs:3", "*vsubuhs", CODE_FOR_xfxx_simple, B_UID(968) };
+static const struct builtin B2_vec_vsubuws = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:2", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(969) };
+static const struct builtin B3_vec_vsubuws = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vsubuws:3", "*vsubuws", CODE_FOR_xfxx_simple, B_UID(970) };
+static const struct builtin B2_vec_vsububs = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:2", "*vsububs", CODE_FOR_xfxx_simple, B_UID(971) };
+static const struct builtin B3_vec_vsububs = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vsububs:3", "*vsububs", CODE_FOR_xfxx_simple, B_UID(972) };
+static const struct builtin B_vec_vsum2sws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum2sws", "*vsum2sws", CODE_FOR_xfxx_complex, B_UID(973) };
+static const struct builtin B_vec_vsum4shs = { { &T_vec_s16, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum4shs", "*vsum4shs", CODE_FOR_xfxx_complex, B_UID(974) };
+static const struct builtin B_vec_vsum4sbs = { { &T_vec_s8, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsum4sbs", "*vsum4sbs", CODE_FOR_xfxx_complex, B_UID(975) };
+static const struct builtin B_vec_vsum4ubs = { { &T_vec_u8, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_vsum4ubs", "*vsum4ubs", CODE_FOR_xfxx_complex, B_UID(976) };
+static const struct builtin B_vec_vsumsws = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_vsumsws", "*vsumsws", CODE_FOR_xfxx_complex, B_UID(977) };
+static const struct builtin B_vec_vrfiz = { { &T_vec_f32, NULL, NULL, }, "x", &T_vec_f32, 1, FALSE, FALSE, 0, "vec_vrfiz", "*vrfiz", CODE_FOR_xfx_fp, B_UID(978) };
+static const struct builtin B1_vec_unpack2sh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_unpack2sh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(979) };
+static const struct builtin B2_vec_unpack2sh = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_unpack2sh:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(980) };
+static const struct builtin B1_vec_unpack2sl = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 0, "vec_unpack2sl:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(981) };
+static const struct builtin B2_vec_unpack2sl = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 0, "vec_unpack2sl:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(982) };
+static const struct builtin B1_vec_unpack2uh = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_unpack2uh:1", "*vmrghh", CODE_FOR_xfxx_perm, B_UID(983) };
+static const struct builtin B2_vec_unpack2uh = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_unpack2uh:2", "*vmrghb", CODE_FOR_xfxx_perm, B_UID(984) };
+static const struct builtin B1_vec_unpack2ul = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 0, "vec_unpack2ul:1", "*vmrglh", CODE_FOR_xfxx_perm, B_UID(985) };
+static const struct builtin B2_vec_unpack2ul = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 0, "vec_unpack2ul:2", "*vmrglb", CODE_FOR_xfxx_perm, B_UID(986) };
+static const struct builtin B1_vec_vupkhsh = { { &T_vec_b16, NULL, NULL, }, "x", &T_vec_b32, 1, FALSE, FALSE, 0, "vec_vupkhsh:1", "*vupkhsh", CODE_FOR_xfx_perm, B_UID(987) };
+static const struct builtin B1_vec_vupkhsb = { { &T_vec_b8, NULL, NULL, }, "x", &T_vec_b16, 1, FALSE, FALSE, 0, "vec_vupkhsb:1", "*vupkhsb", CODE_FOR_xfx_perm, B_UID(988) };
+static const struct builtin B_vec_vupkhpx = { { &T_vec_p16, NULL, NULL, }, "x", &T_vec_u32, 1, FALSE, FALSE, 0, "vec_vupkhpx", "*vupkhpx", CODE_FOR_xfx_perm, B_UID(989) };
+static const struct builtin B2_vec_vupkhsh = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 0, "vec_vupkhsh:2", "*vupkhsh", CODE_FOR_xfx_perm, B_UID(990) };
+static const struct builtin B2_vec_vupkhsb = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 0, "vec_vupkhsb:2", "*vupkhsb", CODE_FOR_xfx_perm, B_UID(991) };
+static const struct builtin B1_vec_vupklsh = { { &T_vec_b16, NULL, NULL, }, "x", &T_vec_b32, 1, FALSE, FALSE, 0, "vec_vupklsh:1", "*vupklsh", CODE_FOR_xfx_perm, B_UID(992) };
+static const struct builtin B1_vec_vupklsb = { { &T_vec_b8, NULL, NULL, }, "x", &T_vec_b16, 1, FALSE, FALSE, 0, "vec_vupklsb:1", "*vupklsb", CODE_FOR_xfx_perm, B_UID(993) };
+static const struct builtin B_vec_vupklpx = { { &T_vec_p16, NULL, NULL, }, "x", &T_vec_u32, 1, FALSE, FALSE, 0, "vec_vupklpx", "*vupklpx", CODE_FOR_xfx_perm, B_UID(994) };
+static const struct builtin B2_vec_vupklsh = { { &T_vec_s16, NULL, NULL, }, "x", &T_vec_s32, 1, FALSE, FALSE, 0, "vec_vupklsh:2", "*vupklsh", CODE_FOR_xfx_perm, B_UID(995) };
+static const struct builtin B2_vec_vupklsb = { { &T_vec_s8, NULL, NULL, }, "x", &T_vec_s16, 1, FALSE, FALSE, 0, "vec_vupklsb:2", "*vupklsb", CODE_FOR_xfx_perm, B_UID(996) };
+static const struct builtin B1_vec_vxor = { { &T_vec_b16, &T_vec_b16, NULL, }, "xx", &T_vec_b16, 2, FALSE, FALSE, 1, "vec_vxor:1", "*vxor", CODE_FOR_xfxx_simple, B_UID(997) };
+static const struct builtin B2_vec_vxor = { { &T_vec_b16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:2", "*vxor", CODE_FOR_xfxx_simple, B_UID(998) };
+static const struct builtin B3_vec_vxor = { { &T_vec_b16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:3", "*vxor", CODE_FOR_xfxx_simple, B_UID(999) };
+static const struct builtin B4_vec_vxor = { { &T_vec_b32, &T_vec_b32, NULL, }, "xx", &T_vec_b32, 2, FALSE, FALSE, 1, "vec_vxor:4", "*vxor", CODE_FOR_xfxx_simple, B_UID(1000) };
+static const struct builtin B5_vec_vxor = { { &T_vec_b32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:5", "*vxor", CODE_FOR_xfxx_simple, B_UID(1001) };
+static const struct builtin B6_vec_vxor = { { &T_vec_b32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:6", "*vxor", CODE_FOR_xfxx_simple, B_UID(1002) };
+static const struct builtin B7_vec_vxor = { { &T_vec_b32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:7", "*vxor", CODE_FOR_xfxx_simple, B_UID(1003) };
+static const struct builtin B8_vec_vxor = { { &T_vec_b8, &T_vec_b8, NULL, }, "xx", &T_vec_b8, 2, FALSE, FALSE, 1, "vec_vxor:8", "*vxor", CODE_FOR_xfxx_simple, B_UID(1004) };
+static const struct builtin B9_vec_vxor = { { &T_vec_b8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:9", "*vxor", CODE_FOR_xfxx_simple, B_UID(1005) };
+static const struct builtin B10_vec_vxor = { { &T_vec_b8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:10", "*vxor", CODE_FOR_xfxx_simple, B_UID(1006) };
+static const struct builtin B11_vec_vxor = { { &T_vec_f32, &T_vec_b32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:11", "*vxor", CODE_FOR_xfxx_simple, B_UID(1007) };
+static const struct builtin B12_vec_vxor = { { &T_vec_f32, &T_vec_f32, NULL, }, "xx", &T_vec_f32, 2, FALSE, FALSE, 1, "vec_vxor:12", "*vxor", CODE_FOR_xfxx_simple, B_UID(1008) };
+static const struct builtin B13_vec_vxor = { { &T_vec_s16, &T_vec_b16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:13", "*vxor", CODE_FOR_xfxx_simple, B_UID(1009) };
+static const struct builtin B14_vec_vxor = { { &T_vec_s16, &T_vec_s16, NULL, }, "xx", &T_vec_s16, 2, FALSE, FALSE, 1, "vec_vxor:14", "*vxor", CODE_FOR_xfxx_simple, B_UID(1010) };
+static const struct builtin B15_vec_vxor = { { &T_vec_s32, &T_vec_b32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:15", "*vxor", CODE_FOR_xfxx_simple, B_UID(1011) };
+static const struct builtin B16_vec_vxor = { { &T_vec_s32, &T_vec_s32, NULL, }, "xx", &T_vec_s32, 2, FALSE, FALSE, 1, "vec_vxor:16", "*vxor", CODE_FOR_xfxx_simple, B_UID(1012) };
+static const struct builtin B17_vec_vxor = { { &T_vec_s8, &T_vec_b8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:17", "*vxor", CODE_FOR_xfxx_simple, B_UID(1013) };
+static const struct builtin B18_vec_vxor = { { &T_vec_s8, &T_vec_s8, NULL, }, "xx", &T_vec_s8, 2, FALSE, FALSE, 1, "vec_vxor:18", "*vxor", CODE_FOR_xfxx_simple, B_UID(1014) };
+static const struct builtin B19_vec_vxor = { { &T_vec_u16, &T_vec_b16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:19", "*vxor", CODE_FOR_xfxx_simple, B_UID(1015) };
+static const struct builtin B20_vec_vxor = { { &T_vec_u16, &T_vec_u16, NULL, }, "xx", &T_vec_u16, 2, FALSE, FALSE, 1, "vec_vxor:20", "*vxor", CODE_FOR_xfxx_simple, B_UID(1016) };
+static const struct builtin B21_vec_vxor = { { &T_vec_u32, &T_vec_b32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:21", "*vxor", CODE_FOR_xfxx_simple, B_UID(1017) };
+static const struct builtin B22_vec_vxor = { { &T_vec_u32, &T_vec_u32, NULL, }, "xx", &T_vec_u32, 2, FALSE, FALSE, 1, "vec_vxor:22", "*vxor", CODE_FOR_xfxx_simple, B_UID(1018) };
+static const struct builtin B23_vec_vxor = { { &T_vec_u8, &T_vec_b8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:23", "*vxor", CODE_FOR_xfxx_simple, B_UID(1019) };
+static const struct builtin B24_vec_vxor = { { &T_vec_u8, &T_vec_u8, NULL, }, "xx", &T_vec_u8, 2, FALSE, FALSE, 1, "vec_vxor:24", "*vxor", CODE_FOR_xfxx_simple, B_UID(1020) };
+#define LAST_B_UID B_UID(1021)
+
+const struct builtin * const Builtin[] = {
+ &B1_vec_abs,
+ &B2_vec_abs,
+ &B3_vec_abs,
+ &B4_vec_abs,
+ &B1_vec_abss,
+ &B2_vec_abss,
+ &B3_vec_abss,
+ &B1_vec_vadduhm,
+ &B2_vec_vadduhm,
+ &B1_vec_vadduwm,
+ &B2_vec_vadduwm,
+ &B1_vec_vaddubm,
+ &B2_vec_vaddubm,
+ &B_vec_vaddfp,
+ &B3_vec_vadduhm,
+ &B4_vec_vadduhm,
+ &B3_vec_vadduwm,
+ &B4_vec_vadduwm,
+ &B3_vec_vaddubm,
+ &B4_vec_vaddubm,
+ &B5_vec_vadduhm,
+ &B6_vec_vadduhm,
+ &B5_vec_vadduwm,
+ &B6_vec_vadduwm,
+ &B5_vec_vaddubm,
+ &B6_vec_vaddubm,
+ &B_vec_vaddcuw,
+ &B1_vec_vaddshs,
+ &B1_vec_vadduhs,
+ &B1_vec_vaddsws,
+ &B1_vec_vadduws,
+ &B1_vec_vaddsbs,
+ &B1_vec_vaddubs,
+ &B2_vec_vaddshs,
+ &B3_vec_vaddshs,
+ &B2_vec_vaddsws,
+ &B3_vec_vaddsws,
+ &B2_vec_vaddsbs,
+ &B3_vec_vaddsbs,
+ &B2_vec_vadduhs,
+ &B3_vec_vadduhs,
+ &B2_vec_vadduws,
+ &B3_vec_vadduws,
+ &B2_vec_vaddubs,
+ &B3_vec_vaddubs,
+ &B1_vec_all_eq,
+ &B2_vec_all_eq,
+ &B3_vec_all_eq,
+ &B4_vec_all_eq,
+ &B5_vec_all_eq,
+ &B6_vec_all_eq,
+ &B7_vec_all_eq,
+ &B8_vec_all_eq,
+ &B9_vec_all_eq,
+ &B10_vec_all_eq,
+ &B11_vec_all_eq,
+ &B12_vec_all_eq,
+ &B13_vec_all_eq,
+ &B14_vec_all_eq,
+ &B15_vec_all_eq,
+ &B16_vec_all_eq,
+ &B17_vec_all_eq,
+ &B18_vec_all_eq,
+ &B19_vec_all_eq,
+ &B20_vec_all_eq,
+ &B21_vec_all_eq,
+ &B22_vec_all_eq,
+ &B23_vec_all_eq,
+ &B1_vec_all_ge,
+ &B2_vec_all_ge,
+ &B3_vec_all_ge,
+ &B4_vec_all_ge,
+ &B5_vec_all_ge,
+ &B6_vec_all_ge,
+ &B7_vec_all_ge,
+ &B8_vec_all_ge,
+ &B9_vec_all_ge,
+ &B10_vec_all_ge,
+ &B11_vec_all_ge,
+ &B12_vec_all_ge,
+ &B13_vec_all_ge,
+ &B14_vec_all_ge,
+ &B15_vec_all_ge,
+ &B16_vec_all_ge,
+ &B17_vec_all_ge,
+ &B18_vec_all_ge,
+ &B19_vec_all_ge,
+ &B1_vec_all_gt,
+ &B2_vec_all_gt,
+ &B3_vec_all_gt,
+ &B4_vec_all_gt,
+ &B5_vec_all_gt,
+ &B6_vec_all_gt,
+ &B7_vec_all_gt,
+ &B8_vec_all_gt,
+ &B9_vec_all_gt,
+ &B10_vec_all_gt,
+ &B11_vec_all_gt,
+ &B12_vec_all_gt,
+ &B13_vec_all_gt,
+ &B14_vec_all_gt,
+ &B15_vec_all_gt,
+ &B16_vec_all_gt,
+ &B17_vec_all_gt,
+ &B18_vec_all_gt,
+ &B19_vec_all_gt,
+ &B_vec_all_in,
+ &B1_vec_all_le,
+ &B2_vec_all_le,
+ &B3_vec_all_le,
+ &B4_vec_all_le,
+ &B5_vec_all_le,
+ &B6_vec_all_le,
+ &B7_vec_all_le,
+ &B8_vec_all_le,
+ &B9_vec_all_le,
+ &B10_vec_all_le,
+ &B11_vec_all_le,
+ &B12_vec_all_le,
+ &B13_vec_all_le,
+ &B14_vec_all_le,
+ &B15_vec_all_le,
+ &B16_vec_all_le,
+ &B17_vec_all_le,
+ &B18_vec_all_le,
+ &B19_vec_all_le,
+ &B1_vec_all_lt,
+ &B2_vec_all_lt,
+ &B3_vec_all_lt,
+ &B4_vec_all_lt,
+ &B5_vec_all_lt,
+ &B6_vec_all_lt,
+ &B7_vec_all_lt,
+ &B8_vec_all_lt,
+ &B9_vec_all_lt,
+ &B10_vec_all_lt,
+ &B11_vec_all_lt,
+ &B12_vec_all_lt,
+ &B13_vec_all_lt,
+ &B14_vec_all_lt,
+ &B15_vec_all_lt,
+ &B16_vec_all_lt,
+ &B17_vec_all_lt,
+ &B18_vec_all_lt,
+ &B19_vec_all_lt,
+ &B_vec_all_nan,
+ &B1_vec_all_ne,
+ &B2_vec_all_ne,
+ &B3_vec_all_ne,
+ &B4_vec_all_ne,
+ &B5_vec_all_ne,
+ &B6_vec_all_ne,
+ &B7_vec_all_ne,
+ &B8_vec_all_ne,
+ &B9_vec_all_ne,
+ &B10_vec_all_ne,
+ &B11_vec_all_ne,
+ &B12_vec_all_ne,
+ &B13_vec_all_ne,
+ &B14_vec_all_ne,
+ &B15_vec_all_ne,
+ &B16_vec_all_ne,
+ &B17_vec_all_ne,
+ &B18_vec_all_ne,
+ &B19_vec_all_ne,
+ &B20_vec_all_ne,
+ &B21_vec_all_ne,
+ &B22_vec_all_ne,
+ &B23_vec_all_ne,
+ &B_vec_all_nge,
+ &B_vec_all_ngt,
+ &B_vec_all_nle,
+ &B_vec_all_nlt,
+ &B_vec_all_numeric,
+ &B1_vec_vand,
+ &B2_vec_vand,
+ &B3_vec_vand,
+ &B4_vec_vand,
+ &B5_vec_vand,
+ &B6_vec_vand,
+ &B7_vec_vand,
+ &B8_vec_vand,
+ &B9_vec_vand,
+ &B10_vec_vand,
+ &B11_vec_vand,
+ &B12_vec_vand,
+ &B13_vec_vand,
+ &B14_vec_vand,
+ &B15_vec_vand,
+ &B16_vec_vand,
+ &B17_vec_vand,
+ &B18_vec_vand,
+ &B19_vec_vand,
+ &B20_vec_vand,
+ &B21_vec_vand,
+ &B22_vec_vand,
+ &B23_vec_vand,
+ &B24_vec_vand,
+ &B1_vec_vandc,
+ &B2_vec_vandc,
+ &B3_vec_vandc,
+ &B4_vec_vandc,
+ &B5_vec_vandc,
+ &B6_vec_vandc,
+ &B7_vec_vandc,
+ &B8_vec_vandc,
+ &B9_vec_vandc,
+ &B10_vec_vandc,
+ &B11_vec_vandc,
+ &B12_vec_vandc,
+ &B13_vec_vandc,
+ &B14_vec_vandc,
+ &B15_vec_vandc,
+ &B16_vec_vandc,
+ &B17_vec_vandc,
+ &B18_vec_vandc,
+ &B19_vec_vandc,
+ &B20_vec_vandc,
+ &B21_vec_vandc,
+ &B22_vec_vandc,
+ &B23_vec_vandc,
+ &B24_vec_vandc,
+ &B1_vec_any_eq,
+ &B2_vec_any_eq,
+ &B3_vec_any_eq,
+ &B4_vec_any_eq,
+ &B5_vec_any_eq,
+ &B6_vec_any_eq,
+ &B7_vec_any_eq,
+ &B8_vec_any_eq,
+ &B9_vec_any_eq,
+ &B10_vec_any_eq,
+ &B11_vec_any_eq,
+ &B12_vec_any_eq,
+ &B13_vec_any_eq,
+ &B14_vec_any_eq,
+ &B15_vec_any_eq,
+ &B16_vec_any_eq,
+ &B17_vec_any_eq,
+ &B18_vec_any_eq,
+ &B19_vec_any_eq,
+ &B20_vec_any_eq,
+ &B21_vec_any_eq,
+ &B22_vec_any_eq,
+ &B23_vec_any_eq,
+ &B1_vec_any_ge,
+ &B2_vec_any_ge,
+ &B3_vec_any_ge,
+ &B4_vec_any_ge,
+ &B5_vec_any_ge,
+ &B6_vec_any_ge,
+ &B7_vec_any_ge,
+ &B8_vec_any_ge,
+ &B9_vec_any_ge,
+ &B10_vec_any_ge,
+ &B11_vec_any_ge,
+ &B12_vec_any_ge,
+ &B13_vec_any_ge,
+ &B14_vec_any_ge,
+ &B15_vec_any_ge,
+ &B16_vec_any_ge,
+ &B17_vec_any_ge,
+ &B18_vec_any_ge,
+ &B19_vec_any_ge,
+ &B1_vec_any_gt,
+ &B2_vec_any_gt,
+ &B3_vec_any_gt,
+ &B4_vec_any_gt,
+ &B5_vec_any_gt,
+ &B6_vec_any_gt,
+ &B7_vec_any_gt,
+ &B8_vec_any_gt,
+ &B9_vec_any_gt,
+ &B10_vec_any_gt,
+ &B11_vec_any_gt,
+ &B12_vec_any_gt,
+ &B13_vec_any_gt,
+ &B14_vec_any_gt,
+ &B15_vec_any_gt,
+ &B16_vec_any_gt,
+ &B17_vec_any_gt,
+ &B18_vec_any_gt,
+ &B19_vec_any_gt,
+ &B1_vec_any_le,
+ &B2_vec_any_le,
+ &B3_vec_any_le,
+ &B4_vec_any_le,
+ &B5_vec_any_le,
+ &B6_vec_any_le,
+ &B7_vec_any_le,
+ &B8_vec_any_le,
+ &B9_vec_any_le,
+ &B10_vec_any_le,
+ &B11_vec_any_le,
+ &B12_vec_any_le,
+ &B13_vec_any_le,
+ &B14_vec_any_le,
+ &B15_vec_any_le,
+ &B16_vec_any_le,
+ &B17_vec_any_le,
+ &B18_vec_any_le,
+ &B19_vec_any_le,
+ &B1_vec_any_lt,
+ &B2_vec_any_lt,
+ &B3_vec_any_lt,
+ &B4_vec_any_lt,
+ &B5_vec_any_lt,
+ &B6_vec_any_lt,
+ &B7_vec_any_lt,
+ &B8_vec_any_lt,
+ &B9_vec_any_lt,
+ &B10_vec_any_lt,
+ &B11_vec_any_lt,
+ &B12_vec_any_lt,
+ &B13_vec_any_lt,
+ &B14_vec_any_lt,
+ &B15_vec_any_lt,
+ &B16_vec_any_lt,
+ &B17_vec_any_lt,
+ &B18_vec_any_lt,
+ &B19_vec_any_lt,
+ &B_vec_any_nan,
+ &B1_vec_any_ne,
+ &B2_vec_any_ne,
+ &B3_vec_any_ne,
+ &B4_vec_any_ne,
+ &B5_vec_any_ne,
+ &B6_vec_any_ne,
+ &B7_vec_any_ne,
+ &B8_vec_any_ne,
+ &B9_vec_any_ne,
+ &B10_vec_any_ne,
+ &B11_vec_any_ne,
+ &B12_vec_any_ne,
+ &B13_vec_any_ne,
+ &B14_vec_any_ne,
+ &B15_vec_any_ne,
+ &B16_vec_any_ne,
+ &B17_vec_any_ne,
+ &B18_vec_any_ne,
+ &B19_vec_any_ne,
+ &B20_vec_any_ne,
+ &B21_vec_any_ne,
+ &B22_vec_any_ne,
+ &B23_vec_any_ne,
+ &B_vec_any_nge,
+ &B_vec_any_ngt,
+ &B_vec_any_nle,
+ &B_vec_any_nlt,
+ &B_vec_any_numeric,
+ &B_vec_any_out,
+ &B_vec_vavgsh,
+ &B_vec_vavgsw,
+ &B_vec_vavgsb,
+ &B_vec_vavguh,
+ &B_vec_vavguw,
+ &B_vec_vavgub,
+ &B_vec_vrfip,
+ &B_vec_vcmpbfp,
+ &B_vec_vcmpeqfp,
+ &B1_vec_vcmpequh,
+ &B1_vec_vcmpequw,
+ &B1_vec_vcmpequb,
+ &B2_vec_vcmpequh,
+ &B2_vec_vcmpequw,
+ &B2_vec_vcmpequb,
+ &B_vec_vcmpgefp,
+ &B_vec_vcmpgtfp,
+ &B_vec_vcmpgtsh,
+ &B_vec_vcmpgtsw,
+ &B_vec_vcmpgtsb,
+ &B_vec_vcmpgtuh,
+ &B_vec_vcmpgtuw,
+ &B_vec_vcmpgtub,
+ &B_vec_cmple,
+ &B1_vec_cmplt,
+ &B2_vec_cmplt,
+ &B3_vec_cmplt,
+ &B4_vec_cmplt,
+ &B5_vec_cmplt,
+ &B6_vec_cmplt,
+ &B7_vec_cmplt,
+ &B_vec_vcfsx,
+ &B_vec_vcfux,
+ &B_vec_vctsxs,
+ &B_vec_vctuxs,
+ &B_vec_dss,
+ &B_vec_dssall,
+ &B1_vec_dst,
+ &B2_vec_dst,
+ &B3_vec_dst,
+ &B4_vec_dst,
+ &B5_vec_dst,
+ &B6_vec_dst,
+ &B7_vec_dst,
+ &B8_vec_dst,
+ &B9_vec_dst,
+ &B10_vec_dst,
+ &B11_vec_dst,
+ &B12_vec_dst,
+ &B13_vec_dst,
+ &B14_vec_dst,
+ &B15_vec_dst,
+ &B16_vec_dst,
+ &B17_vec_dst,
+ &B18_vec_dst,
+ &B19_vec_dst,
+ &B20_vec_dst,
+ &B1_vec_dstst,
+ &B2_vec_dstst,
+ &B3_vec_dstst,
+ &B4_vec_dstst,
+ &B5_vec_dstst,
+ &B6_vec_dstst,
+ &B7_vec_dstst,
+ &B8_vec_dstst,
+ &B9_vec_dstst,
+ &B10_vec_dstst,
+ &B11_vec_dstst,
+ &B12_vec_dstst,
+ &B13_vec_dstst,
+ &B14_vec_dstst,
+ &B15_vec_dstst,
+ &B16_vec_dstst,
+ &B17_vec_dstst,
+ &B18_vec_dstst,
+ &B19_vec_dstst,
+ &B20_vec_dstst,
+ &B1_vec_dststt,
+ &B2_vec_dststt,
+ &B3_vec_dststt,
+ &B4_vec_dststt,
+ &B5_vec_dststt,
+ &B6_vec_dststt,
+ &B7_vec_dststt,
+ &B8_vec_dststt,
+ &B9_vec_dststt,
+ &B10_vec_dststt,
+ &B11_vec_dststt,
+ &B12_vec_dststt,
+ &B13_vec_dststt,
+ &B14_vec_dststt,
+ &B15_vec_dststt,
+ &B16_vec_dststt,
+ &B17_vec_dststt,
+ &B18_vec_dststt,
+ &B19_vec_dststt,
+ &B20_vec_dststt,
+ &B1_vec_dstt,
+ &B2_vec_dstt,
+ &B3_vec_dstt,
+ &B4_vec_dstt,
+ &B5_vec_dstt,
+ &B6_vec_dstt,
+ &B7_vec_dstt,
+ &B8_vec_dstt,
+ &B9_vec_dstt,
+ &B10_vec_dstt,
+ &B11_vec_dstt,
+ &B12_vec_dstt,
+ &B13_vec_dstt,
+ &B14_vec_dstt,
+ &B15_vec_dstt,
+ &B16_vec_dstt,
+ &B17_vec_dstt,
+ &B18_vec_dstt,
+ &B19_vec_dstt,
+ &B20_vec_dstt,
+ &B_vec_vexptefp,
+ &B_vec_vrfim,
+ &B1_vec_lvx,
+ &B2_vec_lvx,
+ &B3_vec_lvx,
+ &B4_vec_lvx,
+ &B5_vec_lvx,
+ &B6_vec_lvx,
+ &B7_vec_lvx,
+ &B8_vec_lvx,
+ &B9_vec_lvx,
+ &B10_vec_lvx,
+ &B11_vec_lvx,
+ &B12_vec_lvx,
+ &B13_vec_lvx,
+ &B14_vec_lvx,
+ &B15_vec_lvx,
+ &B16_vec_lvx,
+ &B17_vec_lvx,
+ &B18_vec_lvx,
+ &B19_vec_lvx,
+ &B20_vec_lvx,
+ &B1_vec_lvewx,
+ &B2_vec_lvewx,
+ &B3_vec_lvewx,
+ &B1_vec_lvehx,
+ &B1_vec_lvebx,
+ &B2_vec_lvebx,
+ &B4_vec_lvewx,
+ &B5_vec_lvewx,
+ &B2_vec_lvehx,
+ &B1_vec_lvxl,
+ &B2_vec_lvxl,
+ &B3_vec_lvxl,
+ &B4_vec_lvxl,
+ &B5_vec_lvxl,
+ &B6_vec_lvxl,
+ &B7_vec_lvxl,
+ &B8_vec_lvxl,
+ &B9_vec_lvxl,
+ &B10_vec_lvxl,
+ &B11_vec_lvxl,
+ &B12_vec_lvxl,
+ &B13_vec_lvxl,
+ &B14_vec_lvxl,
+ &B15_vec_lvxl,
+ &B16_vec_lvxl,
+ &B17_vec_lvxl,
+ &B18_vec_lvxl,
+ &B19_vec_lvxl,
+ &B20_vec_lvxl,
+ &B_vec_vlogefp,
+ &B1_vec_lvsl,
+ &B2_vec_lvsl,
+ &B3_vec_lvsl,
+ &B4_vec_lvsl,
+ &B5_vec_lvsl,
+ &B6_vec_lvsl,
+ &B7_vec_lvsl,
+ &B8_vec_lvsl,
+ &B9_vec_lvsl,
+ &B1_vec_lvsr,
+ &B2_vec_lvsr,
+ &B3_vec_lvsr,
+ &B4_vec_lvsr,
+ &B5_vec_lvsr,
+ &B6_vec_lvsr,
+ &B7_vec_lvsr,
+ &B8_vec_lvsr,
+ &B9_vec_lvsr,
+ &B_vec_vmaddfp,
+ &B_vec_vmhaddshs,
+ &B1_vec_vmaxsh,
+ &B1_vec_vmaxuh,
+ &B1_vec_vmaxsw,
+ &B1_vec_vmaxuw,
+ &B1_vec_vmaxsb,
+ &B1_vec_vmaxub,
+ &B_vec_vmaxfp,
+ &B2_vec_vmaxsh,
+ &B3_vec_vmaxsh,
+ &B2_vec_vmaxsw,
+ &B3_vec_vmaxsw,
+ &B2_vec_vmaxsb,
+ &B3_vec_vmaxsb,
+ &B2_vec_vmaxuh,
+ &B3_vec_vmaxuh,
+ &B2_vec_vmaxuw,
+ &B3_vec_vmaxuw,
+ &B2_vec_vmaxub,
+ &B3_vec_vmaxub,
+ &B1_vec_vmrghh,
+ &B1_vec_vmrghw,
+ &B1_vec_vmrghb,
+ &B2_vec_vmrghw,
+ &B2_vec_vmrghh,
+ &B3_vec_vmrghh,
+ &B3_vec_vmrghw,
+ &B2_vec_vmrghb,
+ &B4_vec_vmrghh,
+ &B4_vec_vmrghw,
+ &B3_vec_vmrghb,
+ &B1_vec_vmrglh,
+ &B1_vec_vmrglw,
+ &B1_vec_vmrglb,
+ &B2_vec_vmrglw,
+ &B2_vec_vmrglh,
+ &B3_vec_vmrglh,
+ &B3_vec_vmrglw,
+ &B2_vec_vmrglb,
+ &B4_vec_vmrglh,
+ &B4_vec_vmrglw,
+ &B3_vec_vmrglb,
+ &B_vec_mfvscr,
+ &B1_vec_vminsh,
+ &B1_vec_vminuh,
+ &B1_vec_vminsw,
+ &B1_vec_vminuw,
+ &B1_vec_vminsb,
+ &B1_vec_vminub,
+ &B_vec_vminfp,
+ &B2_vec_vminsh,
+ &B3_vec_vminsh,
+ &B2_vec_vminsw,
+ &B3_vec_vminsw,
+ &B2_vec_vminsb,
+ &B3_vec_vminsb,
+ &B2_vec_vminuh,
+ &B3_vec_vminuh,
+ &B2_vec_vminuw,
+ &B3_vec_vminuw,
+ &B2_vec_vminub,
+ &B3_vec_vminub,
+ &B1_vec_vmladduhm,
+ &B2_vec_vmladduhm,
+ &B3_vec_vmladduhm,
+ &B4_vec_vmladduhm,
+ &B_vec_vmhraddshs,
+ &B_vec_vmsumshm,
+ &B_vec_vmsummbm,
+ &B_vec_vmsumuhm,
+ &B_vec_vmsumubm,
+ &B_vec_vmsumshs,
+ &B_vec_vmsumuhs,
+ &B1_vec_mtvscr,
+ &B2_vec_mtvscr,
+ &B3_vec_mtvscr,
+ &B4_vec_mtvscr,
+ &B5_vec_mtvscr,
+ &B6_vec_mtvscr,
+ &B7_vec_mtvscr,
+ &B8_vec_mtvscr,
+ &B9_vec_mtvscr,
+ &B10_vec_mtvscr,
+ &B_vec_vmulesh,
+ &B_vec_vmulesb,
+ &B_vec_vmuleuh,
+ &B_vec_vmuleub,
+ &B_vec_vmulosh,
+ &B_vec_vmulosb,
+ &B_vec_vmulouh,
+ &B_vec_vmuloub,
+ &B_vec_vnmsubfp,
+ &B1_vec_vnor,
+ &B2_vec_vnor,
+ &B3_vec_vnor,
+ &B4_vec_vnor,
+ &B5_vec_vnor,
+ &B6_vec_vnor,
+ &B7_vec_vnor,
+ &B8_vec_vnor,
+ &B9_vec_vnor,
+ &B10_vec_vnor,
+ &B1_vec_vor,
+ &B2_vec_vor,
+ &B3_vec_vor,
+ &B4_vec_vor,
+ &B5_vec_vor,
+ &B6_vec_vor,
+ &B7_vec_vor,
+ &B8_vec_vor,
+ &B9_vec_vor,
+ &B10_vec_vor,
+ &B11_vec_vor,
+ &B12_vec_vor,
+ &B13_vec_vor,
+ &B14_vec_vor,
+ &B15_vec_vor,
+ &B16_vec_vor,
+ &B17_vec_vor,
+ &B18_vec_vor,
+ &B19_vec_vor,
+ &B20_vec_vor,
+ &B21_vec_vor,
+ &B22_vec_vor,
+ &B23_vec_vor,
+ &B24_vec_vor,
+ &B1_vec_vpkuhum,
+ &B1_vec_vpkuwum,
+ &B2_vec_vpkuhum,
+ &B2_vec_vpkuwum,
+ &B3_vec_vpkuhum,
+ &B3_vec_vpkuwum,
+ &B_vec_vpkpx,
+ &B_vec_vpkshss,
+ &B_vec_vpkswss,
+ &B_vec_vpkuhus,
+ &B_vec_vpkuwus,
+ &B_vec_vpkshus,
+ &B_vec_vpkswus,
+ &B1_vec_vperm,
+ &B2_vec_vperm,
+ &B3_vec_vperm,
+ &B4_vec_vperm,
+ &B5_vec_vperm,
+ &B6_vec_vperm,
+ &B7_vec_vperm,
+ &B8_vec_vperm,
+ &B9_vec_vperm,
+ &B10_vec_vperm,
+ &B11_vec_vperm,
+ &B_vec_vrefp,
+ &B1_vec_vrlh,
+ &B1_vec_vrlw,
+ &B1_vec_vrlb,
+ &B2_vec_vrlh,
+ &B2_vec_vrlw,
+ &B2_vec_vrlb,
+ &B_vec_vrfin,
+ &B_vec_vrsqrtefp,
+ &B1_vec_vsel,
+ &B2_vec_vsel,
+ &B3_vec_vsel,
+ &B4_vec_vsel,
+ &B5_vec_vsel,
+ &B6_vec_vsel,
+ &B7_vec_vsel,
+ &B8_vec_vsel,
+ &B9_vec_vsel,
+ &B10_vec_vsel,
+ &B11_vec_vsel,
+ &B12_vec_vsel,
+ &B13_vec_vsel,
+ &B14_vec_vsel,
+ &B15_vec_vsel,
+ &B16_vec_vsel,
+ &B17_vec_vsel,
+ &B18_vec_vsel,
+ &B19_vec_vsel,
+ &B20_vec_vsel,
+ &B1_vec_vslh,
+ &B1_vec_vslw,
+ &B1_vec_vslb,
+ &B2_vec_vslh,
+ &B2_vec_vslw,
+ &B2_vec_vslb,
+ &B1_vec_vsldoi,
+ &B2_vec_vsldoi,
+ &B3_vec_vsldoi,
+ &B4_vec_vsldoi,
+ &B5_vec_vsldoi,
+ &B6_vec_vsldoi,
+ &B7_vec_vsldoi,
+ &B8_vec_vsldoi,
+ &B9_vec_vsldoi,
+ &B10_vec_vsldoi,
+ &B11_vec_vsldoi,
+ &B1_vec_vsl,
+ &B2_vec_vsl,
+ &B3_vec_vsl,
+ &B4_vec_vsl,
+ &B5_vec_vsl,
+ &B6_vec_vsl,
+ &B7_vec_vsl,
+ &B8_vec_vsl,
+ &B9_vec_vsl,
+ &B10_vec_vsl,
+ &B11_vec_vsl,
+ &B12_vec_vsl,
+ &B13_vec_vsl,
+ &B14_vec_vsl,
+ &B15_vec_vsl,
+ &B16_vec_vsl,
+ &B17_vec_vsl,
+ &B18_vec_vsl,
+ &B19_vec_vsl,
+ &B20_vec_vsl,
+ &B21_vec_vsl,
+ &B22_vec_vsl,
+ &B23_vec_vsl,
+ &B24_vec_vsl,
+ &B25_vec_vsl,
+ &B26_vec_vsl,
+ &B27_vec_vsl,
+ &B28_vec_vsl,
+ &B29_vec_vsl,
+ &B30_vec_vsl,
+ &B1_vec_vslo,
+ &B2_vec_vslo,
+ &B3_vec_vslo,
+ &B4_vec_vslo,
+ &B5_vec_vslo,
+ &B6_vec_vslo,
+ &B7_vec_vslo,
+ &B8_vec_vslo,
+ &B9_vec_vslo,
+ &B10_vec_vslo,
+ &B11_vec_vslo,
+ &B12_vec_vslo,
+ &B13_vec_vslo,
+ &B14_vec_vslo,
+ &B15_vec_vslo,
+ &B16_vec_vslo,
+ &B1_vec_vsplth,
+ &B1_vec_vspltw,
+ &B1_vec_vspltb,
+ &B2_vec_vspltw,
+ &B2_vec_vsplth,
+ &B3_vec_vsplth,
+ &B3_vec_vspltw,
+ &B2_vec_vspltb,
+ &B4_vec_vsplth,
+ &B4_vec_vspltw,
+ &B3_vec_vspltb,
+ &B_vec_vspltish,
+ &B_vec_vspltisw,
+ &B_vec_vspltisb,
+ &B_vec_splat_u16,
+ &B_vec_splat_u32,
+ &B_vec_splat_u8,
+ &B1_vec_vsrh,
+ &B1_vec_vsrw,
+ &B1_vec_vsrb,
+ &B2_vec_vsrh,
+ &B2_vec_vsrw,
+ &B2_vec_vsrb,
+ &B1_vec_vsrah,
+ &B1_vec_vsraw,
+ &B1_vec_vsrab,
+ &B2_vec_vsrah,
+ &B2_vec_vsraw,
+ &B2_vec_vsrab,
+ &B1_vec_vsr,
+ &B2_vec_vsr,
+ &B3_vec_vsr,
+ &B4_vec_vsr,
+ &B5_vec_vsr,
+ &B6_vec_vsr,
+ &B7_vec_vsr,
+ &B8_vec_vsr,
+ &B9_vec_vsr,
+ &B10_vec_vsr,
+ &B11_vec_vsr,
+ &B12_vec_vsr,
+ &B13_vec_vsr,
+ &B14_vec_vsr,
+ &B15_vec_vsr,
+ &B16_vec_vsr,
+ &B17_vec_vsr,
+ &B18_vec_vsr,
+ &B19_vec_vsr,
+ &B20_vec_vsr,
+ &B21_vec_vsr,
+ &B22_vec_vsr,
+ &B23_vec_vsr,
+ &B24_vec_vsr,
+ &B25_vec_vsr,
+ &B26_vec_vsr,
+ &B27_vec_vsr,
+ &B28_vec_vsr,
+ &B29_vec_vsr,
+ &B30_vec_vsr,
+ &B1_vec_vsro,
+ &B2_vec_vsro,
+ &B3_vec_vsro,
+ &B4_vec_vsro,
+ &B5_vec_vsro,
+ &B6_vec_vsro,
+ &B7_vec_vsro,
+ &B8_vec_vsro,
+ &B9_vec_vsro,
+ &B10_vec_vsro,
+ &B11_vec_vsro,
+ &B12_vec_vsro,
+ &B13_vec_vsro,
+ &B14_vec_vsro,
+ &B15_vec_vsro,
+ &B16_vec_vsro,
+ &B1_vec_stvx,
+ &B2_vec_stvx,
+ &B3_vec_stvx,
+ &B4_vec_stvx,
+ &B5_vec_stvx,
+ &B6_vec_stvx,
+ &B7_vec_stvx,
+ &B8_vec_stvx,
+ &B9_vec_stvx,
+ &B10_vec_stvx,
+ &B11_vec_stvx,
+ &B12_vec_stvx,
+ &B13_vec_stvx,
+ &B14_vec_stvx,
+ &B15_vec_stvx,
+ &B16_vec_stvx,
+ &B17_vec_stvx,
+ &B18_vec_stvx,
+ &B19_vec_stvx,
+ &B20_vec_stvx,
+ &B21_vec_stvx,
+ &B22_vec_stvx,
+ &B23_vec_stvx,
+ &B24_vec_stvx,
+ &B25_vec_stvx,
+ &B26_vec_stvx,
+ &B27_vec_stvx,
+ &B28_vec_stvx,
+ &B29_vec_stvx,
+ &B30_vec_stvx,
+ &B1_vec_stvebx,
+ &B2_vec_stvebx,
+ &B1_vec_stvewx,
+ &B2_vec_stvewx,
+ &B3_vec_stvewx,
+ &B4_vec_stvewx,
+ &B3_vec_stvebx,
+ &B4_vec_stvebx,
+ &B5_vec_stvewx,
+ &B1_vec_stvehx,
+ &B2_vec_stvehx,
+ &B3_vec_stvehx,
+ &B6_vec_stvewx,
+ &B7_vec_stvewx,
+ &B5_vec_stvebx,
+ &B4_vec_stvehx,
+ &B8_vec_stvewx,
+ &B9_vec_stvewx,
+ &B6_vec_stvebx,
+ &B1_vec_stvxl,
+ &B2_vec_stvxl,
+ &B3_vec_stvxl,
+ &B4_vec_stvxl,
+ &B5_vec_stvxl,
+ &B6_vec_stvxl,
+ &B7_vec_stvxl,
+ &B8_vec_stvxl,
+ &B9_vec_stvxl,
+ &B10_vec_stvxl,
+ &B11_vec_stvxl,
+ &B12_vec_stvxl,
+ &B13_vec_stvxl,
+ &B14_vec_stvxl,
+ &B15_vec_stvxl,
+ &B16_vec_stvxl,
+ &B17_vec_stvxl,
+ &B18_vec_stvxl,
+ &B19_vec_stvxl,
+ &B20_vec_stvxl,
+ &B21_vec_stvxl,
+ &B22_vec_stvxl,
+ &B23_vec_stvxl,
+ &B24_vec_stvxl,
+ &B25_vec_stvxl,
+ &B26_vec_stvxl,
+ &B27_vec_stvxl,
+ &B28_vec_stvxl,
+ &B29_vec_stvxl,
+ &B30_vec_stvxl,
+ &B1_vec_vsubuhm,
+ &B2_vec_vsubuhm,
+ &B1_vec_vsubuwm,
+ &B2_vec_vsubuwm,
+ &B1_vec_vsububm,
+ &B2_vec_vsububm,
+ &B_vec_vsubfp,
+ &B3_vec_vsubuhm,
+ &B4_vec_vsubuhm,
+ &B3_vec_vsubuwm,
+ &B4_vec_vsubuwm,
+ &B3_vec_vsububm,
+ &B4_vec_vsububm,
+ &B5_vec_vsubuhm,
+ &B6_vec_vsubuhm,
+ &B5_vec_vsubuwm,
+ &B6_vec_vsubuwm,
+ &B5_vec_vsububm,
+ &B6_vec_vsububm,
+ &B_vec_vsubcuw,
+ &B1_vec_vsubshs,
+ &B1_vec_vsubuhs,
+ &B1_vec_vsubsws,
+ &B1_vec_vsubuws,
+ &B1_vec_vsubsbs,
+ &B1_vec_vsububs,
+ &B2_vec_vsubshs,
+ &B3_vec_vsubshs,
+ &B2_vec_vsubsws,
+ &B3_vec_vsubsws,
+ &B2_vec_vsubsbs,
+ &B3_vec_vsubsbs,
+ &B2_vec_vsubuhs,
+ &B3_vec_vsubuhs,
+ &B2_vec_vsubuws,
+ &B3_vec_vsubuws,
+ &B2_vec_vsububs,
+ &B3_vec_vsububs,
+ &B_vec_vsum2sws,
+ &B_vec_vsum4shs,
+ &B_vec_vsum4sbs,
+ &B_vec_vsum4ubs,
+ &B_vec_vsumsws,
+ &B_vec_vrfiz,
+ &B1_vec_unpack2sh,
+ &B2_vec_unpack2sh,
+ &B1_vec_unpack2sl,
+ &B2_vec_unpack2sl,
+ &B1_vec_unpack2uh,
+ &B2_vec_unpack2uh,
+ &B1_vec_unpack2ul,
+ &B2_vec_unpack2ul,
+ &B1_vec_vupkhsh,
+ &B1_vec_vupkhsb,
+ &B_vec_vupkhpx,
+ &B2_vec_vupkhsh,
+ &B2_vec_vupkhsb,
+ &B1_vec_vupklsh,
+ &B1_vec_vupklsb,
+ &B_vec_vupklpx,
+ &B2_vec_vupklsh,
+ &B2_vec_vupklsb,
+ &B1_vec_vxor,
+ &B2_vec_vxor,
+ &B3_vec_vxor,
+ &B4_vec_vxor,
+ &B5_vec_vxor,
+ &B6_vec_vxor,
+ &B7_vec_vxor,
+ &B8_vec_vxor,
+ &B9_vec_vxor,
+ &B10_vec_vxor,
+ &B11_vec_vxor,
+ &B12_vec_vxor,
+ &B13_vec_vxor,
+ &B14_vec_vxor,
+ &B15_vec_vxor,
+ &B16_vec_vxor,
+ &B17_vec_vxor,
+ &B18_vec_vxor,
+ &B19_vec_vxor,
+ &B20_vec_vxor,
+ &B21_vec_vxor,
+ &B22_vec_vxor,
+ &B23_vec_vxor,
+ &B24_vec_vxor,
+};
+
+static const struct builtin *const O_vec_abs[4] = {
+ &B1_vec_abs,
+ &B2_vec_abs,
+ &B3_vec_abs,
+ &B4_vec_abs,
+};
+static const struct builtin *const O_vec_abss[3] = {
+ &B1_vec_abss,
+ &B2_vec_abss,
+ &B3_vec_abss,
+};
+static const struct builtin *const O_vec_add[19] = {
+ &B1_vec_vadduhm,
+ &B2_vec_vadduhm,
+ &B1_vec_vadduwm,
+ &B2_vec_vadduwm,
+ &B1_vec_vaddubm,
+ &B2_vec_vaddubm,
+ &B_vec_vaddfp,
+ &B3_vec_vadduhm,
+ &B4_vec_vadduhm,
+ &B3_vec_vadduwm,
+ &B4_vec_vadduwm,
+ &B3_vec_vaddubm,
+ &B4_vec_vaddubm,
+ &B5_vec_vadduhm,
+ &B6_vec_vadduhm,
+ &B5_vec_vadduwm,
+ &B6_vec_vadduwm,
+ &B5_vec_vaddubm,
+ &B6_vec_vaddubm,
+};
+static const struct builtin *const O_vec_addc[1] = {
+ &B_vec_vaddcuw,
+};
+static const struct builtin *const O_vec_adds[18] = {
+ &B1_vec_vaddshs,
+ &B1_vec_vadduhs,
+ &B1_vec_vaddsws,
+ &B1_vec_vadduws,
+ &B1_vec_vaddsbs,
+ &B1_vec_vaddubs,
+ &B2_vec_vaddshs,
+ &B3_vec_vaddshs,
+ &B2_vec_vaddsws,
+ &B3_vec_vaddsws,
+ &B2_vec_vaddsbs,
+ &B3_vec_vaddsbs,
+ &B2_vec_vadduhs,
+ &B3_vec_vadduhs,
+ &B2_vec_vadduws,
+ &B3_vec_vadduws,
+ &B2_vec_vaddubs,
+ &B3_vec_vaddubs,
+};
+static const struct builtin *const O_vec_all_eq[23] = {
+ &B1_vec_all_eq,
+ &B2_vec_all_eq,
+ &B3_vec_all_eq,
+ &B4_vec_all_eq,
+ &B5_vec_all_eq,
+ &B6_vec_all_eq,
+ &B7_vec_all_eq,
+ &B8_vec_all_eq,
+ &B9_vec_all_eq,
+ &B10_vec_all_eq,
+ &B11_vec_all_eq,
+ &B12_vec_all_eq,
+ &B13_vec_all_eq,
+ &B14_vec_all_eq,
+ &B15_vec_all_eq,
+ &B16_vec_all_eq,
+ &B17_vec_all_eq,
+ &B18_vec_all_eq,
+ &B19_vec_all_eq,
+ &B20_vec_all_eq,
+ &B21_vec_all_eq,
+ &B22_vec_all_eq,
+ &B23_vec_all_eq,
+};
+static const struct builtin *const O_vec_all_ge[19] = {
+ &B1_vec_all_ge,
+ &B2_vec_all_ge,
+ &B3_vec_all_ge,
+ &B4_vec_all_ge,
+ &B5_vec_all_ge,
+ &B6_vec_all_ge,
+ &B7_vec_all_ge,
+ &B8_vec_all_ge,
+ &B9_vec_all_ge,
+ &B10_vec_all_ge,
+ &B11_vec_all_ge,
+ &B12_vec_all_ge,
+ &B13_vec_all_ge,
+ &B14_vec_all_ge,
+ &B15_vec_all_ge,
+ &B16_vec_all_ge,
+ &B17_vec_all_ge,
+ &B18_vec_all_ge,
+ &B19_vec_all_ge,
+};
+static const struct builtin *const O_vec_all_gt[19] = {
+ &B1_vec_all_gt,
+ &B2_vec_all_gt,
+ &B3_vec_all_gt,
+ &B4_vec_all_gt,
+ &B5_vec_all_gt,
+ &B6_vec_all_gt,
+ &B7_vec_all_gt,
+ &B8_vec_all_gt,
+ &B9_vec_all_gt,
+ &B10_vec_all_gt,
+ &B11_vec_all_gt,
+ &B12_vec_all_gt,
+ &B13_vec_all_gt,
+ &B14_vec_all_gt,
+ &B15_vec_all_gt,
+ &B16_vec_all_gt,
+ &B17_vec_all_gt,
+ &B18_vec_all_gt,
+ &B19_vec_all_gt,
+};
+static const struct builtin *const O_vec_all_in[1] = {
+ &B_vec_all_in,
+};
+static const struct builtin *const O_vec_all_le[19] = {
+ &B1_vec_all_le,
+ &B2_vec_all_le,
+ &B3_vec_all_le,
+ &B4_vec_all_le,
+ &B5_vec_all_le,
+ &B6_vec_all_le,
+ &B7_vec_all_le,
+ &B8_vec_all_le,
+ &B9_vec_all_le,
+ &B10_vec_all_le,
+ &B11_vec_all_le,
+ &B12_vec_all_le,
+ &B13_vec_all_le,
+ &B14_vec_all_le,
+ &B15_vec_all_le,
+ &B16_vec_all_le,
+ &B17_vec_all_le,
+ &B18_vec_all_le,
+ &B19_vec_all_le,
+};
+static const struct builtin *const O_vec_all_lt[19] = {
+ &B1_vec_all_lt,
+ &B2_vec_all_lt,
+ &B3_vec_all_lt,
+ &B4_vec_all_lt,
+ &B5_vec_all_lt,
+ &B6_vec_all_lt,
+ &B7_vec_all_lt,
+ &B8_vec_all_lt,
+ &B9_vec_all_lt,
+ &B10_vec_all_lt,
+ &B11_vec_all_lt,
+ &B12_vec_all_lt,
+ &B13_vec_all_lt,
+ &B14_vec_all_lt,
+ &B15_vec_all_lt,
+ &B16_vec_all_lt,
+ &B17_vec_all_lt,
+ &B18_vec_all_lt,
+ &B19_vec_all_lt,
+};
+static const struct builtin *const O_vec_all_nan[1] = {
+ &B_vec_all_nan,
+};
+static const struct builtin *const O_vec_all_ne[23] = {
+ &B1_vec_all_ne,
+ &B2_vec_all_ne,
+ &B3_vec_all_ne,
+ &B4_vec_all_ne,
+ &B5_vec_all_ne,
+ &B6_vec_all_ne,
+ &B7_vec_all_ne,
+ &B8_vec_all_ne,
+ &B9_vec_all_ne,
+ &B10_vec_all_ne,
+ &B11_vec_all_ne,
+ &B12_vec_all_ne,
+ &B13_vec_all_ne,
+ &B14_vec_all_ne,
+ &B15_vec_all_ne,
+ &B16_vec_all_ne,
+ &B17_vec_all_ne,
+ &B18_vec_all_ne,
+ &B19_vec_all_ne,
+ &B20_vec_all_ne,
+ &B21_vec_all_ne,
+ &B22_vec_all_ne,
+ &B23_vec_all_ne,
+};
+static const struct builtin *const O_vec_all_nge[1] = {
+ &B_vec_all_nge,
+};
+static const struct builtin *const O_vec_all_ngt[1] = {
+ &B_vec_all_ngt,
+};
+static const struct builtin *const O_vec_all_nle[1] = {
+ &B_vec_all_nle,
+};
+static const struct builtin *const O_vec_all_nlt[1] = {
+ &B_vec_all_nlt,
+};
+static const struct builtin *const O_vec_all_numeric[1] = {
+ &B_vec_all_numeric,
+};
+static const struct builtin *const O_vec_and[24] = {
+ &B1_vec_vand,
+ &B2_vec_vand,
+ &B3_vec_vand,
+ &B4_vec_vand,
+ &B5_vec_vand,
+ &B6_vec_vand,
+ &B7_vec_vand,
+ &B8_vec_vand,
+ &B9_vec_vand,
+ &B10_vec_vand,
+ &B11_vec_vand,
+ &B12_vec_vand,
+ &B13_vec_vand,
+ &B14_vec_vand,
+ &B15_vec_vand,
+ &B16_vec_vand,
+ &B17_vec_vand,
+ &B18_vec_vand,
+ &B19_vec_vand,
+ &B20_vec_vand,
+ &B21_vec_vand,
+ &B22_vec_vand,
+ &B23_vec_vand,
+ &B24_vec_vand,
+};
+static const struct builtin *const O_vec_andc[24] = {
+ &B1_vec_vandc,
+ &B2_vec_vandc,
+ &B3_vec_vandc,
+ &B4_vec_vandc,
+ &B5_vec_vandc,
+ &B6_vec_vandc,
+ &B7_vec_vandc,
+ &B8_vec_vandc,
+ &B9_vec_vandc,
+ &B10_vec_vandc,
+ &B11_vec_vandc,
+ &B12_vec_vandc,
+ &B13_vec_vandc,
+ &B14_vec_vandc,
+ &B15_vec_vandc,
+ &B16_vec_vandc,
+ &B17_vec_vandc,
+ &B18_vec_vandc,
+ &B19_vec_vandc,
+ &B20_vec_vandc,
+ &B21_vec_vandc,
+ &B22_vec_vandc,
+ &B23_vec_vandc,
+ &B24_vec_vandc,
+};
+static const struct builtin *const O_vec_any_eq[23] = {
+ &B1_vec_any_eq,
+ &B2_vec_any_eq,
+ &B3_vec_any_eq,
+ &B4_vec_any_eq,
+ &B5_vec_any_eq,
+ &B6_vec_any_eq,
+ &B7_vec_any_eq,
+ &B8_vec_any_eq,
+ &B9_vec_any_eq,
+ &B10_vec_any_eq,
+ &B11_vec_any_eq,
+ &B12_vec_any_eq,
+ &B13_vec_any_eq,
+ &B14_vec_any_eq,
+ &B15_vec_any_eq,
+ &B16_vec_any_eq,
+ &B17_vec_any_eq,
+ &B18_vec_any_eq,
+ &B19_vec_any_eq,
+ &B20_vec_any_eq,
+ &B21_vec_any_eq,
+ &B22_vec_any_eq,
+ &B23_vec_any_eq,
+};
+static const struct builtin *const O_vec_any_ge[19] = {
+ &B1_vec_any_ge,
+ &B2_vec_any_ge,
+ &B3_vec_any_ge,
+ &B4_vec_any_ge,
+ &B5_vec_any_ge,
+ &B6_vec_any_ge,
+ &B7_vec_any_ge,
+ &B8_vec_any_ge,
+ &B9_vec_any_ge,
+ &B10_vec_any_ge,
+ &B11_vec_any_ge,
+ &B12_vec_any_ge,
+ &B13_vec_any_ge,
+ &B14_vec_any_ge,
+ &B15_vec_any_ge,
+ &B16_vec_any_ge,
+ &B17_vec_any_ge,
+ &B18_vec_any_ge,
+ &B19_vec_any_ge,
+};
+static const struct builtin *const O_vec_any_gt[19] = {
+ &B1_vec_any_gt,
+ &B2_vec_any_gt,
+ &B3_vec_any_gt,
+ &B4_vec_any_gt,
+ &B5_vec_any_gt,
+ &B6_vec_any_gt,
+ &B7_vec_any_gt,
+ &B8_vec_any_gt,
+ &B9_vec_any_gt,
+ &B10_vec_any_gt,
+ &B11_vec_any_gt,
+ &B12_vec_any_gt,
+ &B13_vec_any_gt,
+ &B14_vec_any_gt,
+ &B15_vec_any_gt,
+ &B16_vec_any_gt,
+ &B17_vec_any_gt,
+ &B18_vec_any_gt,
+ &B19_vec_any_gt,
+};
+static const struct builtin *const O_vec_any_le[19] = {
+ &B1_vec_any_le,
+ &B2_vec_any_le,
+ &B3_vec_any_le,
+ &B4_vec_any_le,
+ &B5_vec_any_le,
+ &B6_vec_any_le,
+ &B7_vec_any_le,
+ &B8_vec_any_le,
+ &B9_vec_any_le,
+ &B10_vec_any_le,
+ &B11_vec_any_le,
+ &B12_vec_any_le,
+ &B13_vec_any_le,
+ &B14_vec_any_le,
+ &B15_vec_any_le,
+ &B16_vec_any_le,
+ &B17_vec_any_le,
+ &B18_vec_any_le,
+ &B19_vec_any_le,
+};
+static const struct builtin *const O_vec_any_lt[19] = {
+ &B1_vec_any_lt,
+ &B2_vec_any_lt,
+ &B3_vec_any_lt,
+ &B4_vec_any_lt,
+ &B5_vec_any_lt,
+ &B6_vec_any_lt,
+ &B7_vec_any_lt,
+ &B8_vec_any_lt,
+ &B9_vec_any_lt,
+ &B10_vec_any_lt,
+ &B11_vec_any_lt,
+ &B12_vec_any_lt,
+ &B13_vec_any_lt,
+ &B14_vec_any_lt,
+ &B15_vec_any_lt,
+ &B16_vec_any_lt,
+ &B17_vec_any_lt,
+ &B18_vec_any_lt,
+ &B19_vec_any_lt,
+};
+static const struct builtin *const O_vec_any_nan[1] = {
+ &B_vec_any_nan,
+};
+static const struct builtin *const O_vec_any_ne[23] = {
+ &B1_vec_any_ne,
+ &B2_vec_any_ne,
+ &B3_vec_any_ne,
+ &B4_vec_any_ne,
+ &B5_vec_any_ne,
+ &B6_vec_any_ne,
+ &B7_vec_any_ne,
+ &B8_vec_any_ne,
+ &B9_vec_any_ne,
+ &B10_vec_any_ne,
+ &B11_vec_any_ne,
+ &B12_vec_any_ne,
+ &B13_vec_any_ne,
+ &B14_vec_any_ne,
+ &B15_vec_any_ne,
+ &B16_vec_any_ne,
+ &B17_vec_any_ne,
+ &B18_vec_any_ne,
+ &B19_vec_any_ne,
+ &B20_vec_any_ne,
+ &B21_vec_any_ne,
+ &B22_vec_any_ne,
+ &B23_vec_any_ne,
+};
+static const struct builtin *const O_vec_any_nge[1] = {
+ &B_vec_any_nge,
+};
+static const struct builtin *const O_vec_any_ngt[1] = {
+ &B_vec_any_ngt,
+};
+static const struct builtin *const O_vec_any_nle[1] = {
+ &B_vec_any_nle,
+};
+static const struct builtin *const O_vec_any_nlt[1] = {
+ &B_vec_any_nlt,
+};
+static const struct builtin *const O_vec_any_numeric[1] = {
+ &B_vec_any_numeric,
+};
+static const struct builtin *const O_vec_any_out[1] = {
+ &B_vec_any_out,
+};
+static const struct builtin *const O_vec_avg[6] = {
+ &B_vec_vavgsh,
+ &B_vec_vavgsw,
+ &B_vec_vavgsb,
+ &B_vec_vavguh,
+ &B_vec_vavguw,
+ &B_vec_vavgub,
+};
+static const struct builtin *const O_vec_ceil[1] = {
+ &B_vec_vrfip,
+};
+static const struct builtin *const O_vec_cmpb[1] = {
+ &B_vec_vcmpbfp,
+};
+static const struct builtin *const O_vec_cmpeq[7] = {
+ &B_vec_vcmpeqfp,
+ &B1_vec_vcmpequh,
+ &B1_vec_vcmpequw,
+ &B1_vec_vcmpequb,
+ &B2_vec_vcmpequh,
+ &B2_vec_vcmpequw,
+ &B2_vec_vcmpequb,
+};
+static const struct builtin *const O_vec_cmpge[1] = {
+ &B_vec_vcmpgefp,
+};
+static const struct builtin *const O_vec_cmpgt[7] = {
+ &B_vec_vcmpgtfp,
+ &B_vec_vcmpgtsh,
+ &B_vec_vcmpgtsw,
+ &B_vec_vcmpgtsb,
+ &B_vec_vcmpgtuh,
+ &B_vec_vcmpgtuw,
+ &B_vec_vcmpgtub,
+};
+static const struct builtin *const O_vec_cmple[1] = {
+ &B_vec_cmple,
+};
+static const struct builtin *const O_vec_cmplt[7] = {
+ &B1_vec_cmplt,
+ &B2_vec_cmplt,
+ &B3_vec_cmplt,
+ &B4_vec_cmplt,
+ &B5_vec_cmplt,
+ &B6_vec_cmplt,
+ &B7_vec_cmplt,
+};
+static const struct builtin *const O_vec_ctf[2] = {
+ &B_vec_vcfsx,
+ &B_vec_vcfux,
+};
+static const struct builtin *const O_vec_cts[1] = {
+ &B_vec_vctsxs,
+};
+static const struct builtin *const O_vec_ctu[1] = {
+ &B_vec_vctuxs,
+};
+static const struct builtin *const O_vec_dss[1] = {
+ &B_vec_dss,
+};
+static const struct builtin *const O_vec_dssall[1] = {
+ &B_vec_dssall,
+};
+static const struct builtin *const O_vec_dst[20] = {
+ &B1_vec_dst,
+ &B2_vec_dst,
+ &B3_vec_dst,
+ &B4_vec_dst,
+ &B5_vec_dst,
+ &B6_vec_dst,
+ &B7_vec_dst,
+ &B8_vec_dst,
+ &B9_vec_dst,
+ &B10_vec_dst,
+ &B11_vec_dst,
+ &B12_vec_dst,
+ &B13_vec_dst,
+ &B14_vec_dst,
+ &B15_vec_dst,
+ &B16_vec_dst,
+ &B17_vec_dst,
+ &B18_vec_dst,
+ &B19_vec_dst,
+ &B20_vec_dst,
+};
+static const struct builtin *const O_vec_dstst[20] = {
+ &B1_vec_dstst,
+ &B2_vec_dstst,
+ &B3_vec_dstst,
+ &B4_vec_dstst,
+ &B5_vec_dstst,
+ &B6_vec_dstst,
+ &B7_vec_dstst,
+ &B8_vec_dstst,
+ &B9_vec_dstst,
+ &B10_vec_dstst,
+ &B11_vec_dstst,
+ &B12_vec_dstst,
+ &B13_vec_dstst,
+ &B14_vec_dstst,
+ &B15_vec_dstst,
+ &B16_vec_dstst,
+ &B17_vec_dstst,
+ &B18_vec_dstst,
+ &B19_vec_dstst,
+ &B20_vec_dstst,
+};
+static const struct builtin *const O_vec_dststt[20] = {
+ &B1_vec_dststt,
+ &B2_vec_dststt,
+ &B3_vec_dststt,
+ &B4_vec_dststt,
+ &B5_vec_dststt,
+ &B6_vec_dststt,
+ &B7_vec_dststt,
+ &B8_vec_dststt,
+ &B9_vec_dststt,
+ &B10_vec_dststt,
+ &B11_vec_dststt,
+ &B12_vec_dststt,
+ &B13_vec_dststt,
+ &B14_vec_dststt,
+ &B15_vec_dststt,
+ &B16_vec_dststt,
+ &B17_vec_dststt,
+ &B18_vec_dststt,
+ &B19_vec_dststt,
+ &B20_vec_dststt,
+};
+static const struct builtin *const O_vec_dstt[20] = {
+ &B1_vec_dstt,
+ &B2_vec_dstt,
+ &B3_vec_dstt,
+ &B4_vec_dstt,
+ &B5_vec_dstt,
+ &B6_vec_dstt,
+ &B7_vec_dstt,
+ &B8_vec_dstt,
+ &B9_vec_dstt,
+ &B10_vec_dstt,
+ &B11_vec_dstt,
+ &B12_vec_dstt,
+ &B13_vec_dstt,
+ &B14_vec_dstt,
+ &B15_vec_dstt,
+ &B16_vec_dstt,
+ &B17_vec_dstt,
+ &B18_vec_dstt,
+ &B19_vec_dstt,
+ &B20_vec_dstt,
+};
+static const struct builtin *const O_vec_expte[1] = {
+ &B_vec_vexptefp,
+};
+static const struct builtin *const O_vec_floor[1] = {
+ &B_vec_vrfim,
+};
+static const struct builtin *const O_vec_ld[20] = {
+ &B1_vec_lvx,
+ &B2_vec_lvx,
+ &B3_vec_lvx,
+ &B4_vec_lvx,
+ &B5_vec_lvx,
+ &B6_vec_lvx,
+ &B7_vec_lvx,
+ &B8_vec_lvx,
+ &B9_vec_lvx,
+ &B10_vec_lvx,
+ &B11_vec_lvx,
+ &B12_vec_lvx,
+ &B13_vec_lvx,
+ &B14_vec_lvx,
+ &B15_vec_lvx,
+ &B16_vec_lvx,
+ &B17_vec_lvx,
+ &B18_vec_lvx,
+ &B19_vec_lvx,
+ &B20_vec_lvx,
+};
+static const struct builtin *const O_vec_lde[9] = {
+ &B1_vec_lvewx,
+ &B2_vec_lvewx,
+ &B3_vec_lvewx,
+ &B1_vec_lvehx,
+ &B1_vec_lvebx,
+ &B2_vec_lvebx,
+ &B4_vec_lvewx,
+ &B5_vec_lvewx,
+ &B2_vec_lvehx,
+};
+static const struct builtin *const O_vec_ldl[20] = {
+ &B1_vec_lvxl,
+ &B2_vec_lvxl,
+ &B3_vec_lvxl,
+ &B4_vec_lvxl,
+ &B5_vec_lvxl,
+ &B6_vec_lvxl,
+ &B7_vec_lvxl,
+ &B8_vec_lvxl,
+ &B9_vec_lvxl,
+ &B10_vec_lvxl,
+ &B11_vec_lvxl,
+ &B12_vec_lvxl,
+ &B13_vec_lvxl,
+ &B14_vec_lvxl,
+ &B15_vec_lvxl,
+ &B16_vec_lvxl,
+ &B17_vec_lvxl,
+ &B18_vec_lvxl,
+ &B19_vec_lvxl,
+ &B20_vec_lvxl,
+};
+static const struct builtin *const O_vec_loge[1] = {
+ &B_vec_vlogefp,
+};
+static const struct builtin *const O_vec_lvebx[2] = {
+ &B1_vec_lvebx,
+ &B2_vec_lvebx,
+};
+static const struct builtin *const O_vec_lvehx[2] = {
+ &B1_vec_lvehx,
+ &B2_vec_lvehx,
+};
+static const struct builtin *const O_vec_lvewx[5] = {
+ &B1_vec_lvewx,
+ &B2_vec_lvewx,
+ &B3_vec_lvewx,
+ &B4_vec_lvewx,
+ &B5_vec_lvewx,
+};
+static const struct builtin *const O_vec_lvsl[9] = {
+ &B1_vec_lvsl,
+ &B2_vec_lvsl,
+ &B3_vec_lvsl,
+ &B4_vec_lvsl,
+ &B5_vec_lvsl,
+ &B6_vec_lvsl,
+ &B7_vec_lvsl,
+ &B8_vec_lvsl,
+ &B9_vec_lvsl,
+};
+static const struct builtin *const O_vec_lvsr[9] = {
+ &B1_vec_lvsr,
+ &B2_vec_lvsr,
+ &B3_vec_lvsr,
+ &B4_vec_lvsr,
+ &B5_vec_lvsr,
+ &B6_vec_lvsr,
+ &B7_vec_lvsr,
+ &B8_vec_lvsr,
+ &B9_vec_lvsr,
+};
+static const struct builtin *const O_vec_lvx[20] = {
+ &B1_vec_lvx,
+ &B2_vec_lvx,
+ &B3_vec_lvx,
+ &B4_vec_lvx,
+ &B5_vec_lvx,
+ &B6_vec_lvx,
+ &B7_vec_lvx,
+ &B8_vec_lvx,
+ &B9_vec_lvx,
+ &B10_vec_lvx,
+ &B11_vec_lvx,
+ &B12_vec_lvx,
+ &B13_vec_lvx,
+ &B14_vec_lvx,
+ &B15_vec_lvx,
+ &B16_vec_lvx,
+ &B17_vec_lvx,
+ &B18_vec_lvx,
+ &B19_vec_lvx,
+ &B20_vec_lvx,
+};
+static const struct builtin *const O_vec_lvxl[20] = {
+ &B1_vec_lvxl,
+ &B2_vec_lvxl,
+ &B3_vec_lvxl,
+ &B4_vec_lvxl,
+ &B5_vec_lvxl,
+ &B6_vec_lvxl,
+ &B7_vec_lvxl,
+ &B8_vec_lvxl,
+ &B9_vec_lvxl,
+ &B10_vec_lvxl,
+ &B11_vec_lvxl,
+ &B12_vec_lvxl,
+ &B13_vec_lvxl,
+ &B14_vec_lvxl,
+ &B15_vec_lvxl,
+ &B16_vec_lvxl,
+ &B17_vec_lvxl,
+ &B18_vec_lvxl,
+ &B19_vec_lvxl,
+ &B20_vec_lvxl,
+};
+static const struct builtin *const O_vec_madd[1] = {
+ &B_vec_vmaddfp,
+};
+static const struct builtin *const O_vec_madds[1] = {
+ &B_vec_vmhaddshs,
+};
+static const struct builtin *const O_vec_max[19] = {
+ &B1_vec_vmaxsh,
+ &B1_vec_vmaxuh,
+ &B1_vec_vmaxsw,
+ &B1_vec_vmaxuw,
+ &B1_vec_vmaxsb,
+ &B1_vec_vmaxub,
+ &B_vec_vmaxfp,
+ &B2_vec_vmaxsh,
+ &B3_vec_vmaxsh,
+ &B2_vec_vmaxsw,
+ &B3_vec_vmaxsw,
+ &B2_vec_vmaxsb,
+ &B3_vec_vmaxsb,
+ &B2_vec_vmaxuh,
+ &B3_vec_vmaxuh,
+ &B2_vec_vmaxuw,
+ &B3_vec_vmaxuw,
+ &B2_vec_vmaxub,
+ &B3_vec_vmaxub,
+};
+static const struct builtin *const O_vec_mergeh[11] = {
+ &B1_vec_vmrghh,
+ &B1_vec_vmrghw,
+ &B1_vec_vmrghb,
+ &B2_vec_vmrghw,
+ &B2_vec_vmrghh,
+ &B3_vec_vmrghh,
+ &B3_vec_vmrghw,
+ &B2_vec_vmrghb,
+ &B4_vec_vmrghh,
+ &B4_vec_vmrghw,
+ &B3_vec_vmrghb,
+};
+static const struct builtin *const O_vec_mergel[11] = {
+ &B1_vec_vmrglh,
+ &B1_vec_vmrglw,
+ &B1_vec_vmrglb,
+ &B2_vec_vmrglw,
+ &B2_vec_vmrglh,
+ &B3_vec_vmrglh,
+ &B3_vec_vmrglw,
+ &B2_vec_vmrglb,
+ &B4_vec_vmrglh,
+ &B4_vec_vmrglw,
+ &B3_vec_vmrglb,
+};
+static const struct builtin *const O_vec_mfvscr[1] = {
+ &B_vec_mfvscr,
+};
+static const struct builtin *const O_vec_min[19] = {
+ &B1_vec_vminsh,
+ &B1_vec_vminuh,
+ &B1_vec_vminsw,
+ &B1_vec_vminuw,
+ &B1_vec_vminsb,
+ &B1_vec_vminub,
+ &B_vec_vminfp,
+ &B2_vec_vminsh,
+ &B3_vec_vminsh,
+ &B2_vec_vminsw,
+ &B3_vec_vminsw,
+ &B2_vec_vminsb,
+ &B3_vec_vminsb,
+ &B2_vec_vminuh,
+ &B3_vec_vminuh,
+ &B2_vec_vminuw,
+ &B3_vec_vminuw,
+ &B2_vec_vminub,
+ &B3_vec_vminub,
+};
+static const struct builtin *const O_vec_mladd[4] = {
+ &B1_vec_vmladduhm,
+ &B2_vec_vmladduhm,
+ &B3_vec_vmladduhm,
+ &B4_vec_vmladduhm,
+};
+static const struct builtin *const O_vec_mradds[1] = {
+ &B_vec_vmhraddshs,
+};
+static const struct builtin *const O_vec_msum[4] = {
+ &B_vec_vmsumshm,
+ &B_vec_vmsummbm,
+ &B_vec_vmsumuhm,
+ &B_vec_vmsumubm,
+};
+static const struct builtin *const O_vec_msums[2] = {
+ &B_vec_vmsumshs,
+ &B_vec_vmsumuhs,
+};
+static const struct builtin *const O_vec_mtvscr[10] = {
+ &B1_vec_mtvscr,
+ &B2_vec_mtvscr,
+ &B3_vec_mtvscr,
+ &B4_vec_mtvscr,
+ &B5_vec_mtvscr,
+ &B6_vec_mtvscr,
+ &B7_vec_mtvscr,
+ &B8_vec_mtvscr,
+ &B9_vec_mtvscr,
+ &B10_vec_mtvscr,
+};
+static const struct builtin *const O_vec_mule[4] = {
+ &B_vec_vmulesh,
+ &B_vec_vmulesb,
+ &B_vec_vmuleuh,
+ &B_vec_vmuleub,
+};
+static const struct builtin *const O_vec_mulo[4] = {
+ &B_vec_vmulosh,
+ &B_vec_vmulosb,
+ &B_vec_vmulouh,
+ &B_vec_vmuloub,
+};
+static const struct builtin *const O_vec_nmsub[1] = {
+ &B_vec_vnmsubfp,
+};
+static const struct builtin *const O_vec_nor[10] = {
+ &B1_vec_vnor,
+ &B2_vec_vnor,
+ &B3_vec_vnor,
+ &B4_vec_vnor,
+ &B5_vec_vnor,
+ &B6_vec_vnor,
+ &B7_vec_vnor,
+ &B8_vec_vnor,
+ &B9_vec_vnor,
+ &B10_vec_vnor,
+};
+static const struct builtin *const O_vec_or[24] = {
+ &B1_vec_vor,
+ &B2_vec_vor,
+ &B3_vec_vor,
+ &B4_vec_vor,
+ &B5_vec_vor,
+ &B6_vec_vor,
+ &B7_vec_vor,
+ &B8_vec_vor,
+ &B9_vec_vor,
+ &B10_vec_vor,
+ &B11_vec_vor,
+ &B12_vec_vor,
+ &B13_vec_vor,
+ &B14_vec_vor,
+ &B15_vec_vor,
+ &B16_vec_vor,
+ &B17_vec_vor,
+ &B18_vec_vor,
+ &B19_vec_vor,
+ &B20_vec_vor,
+ &B21_vec_vor,
+ &B22_vec_vor,
+ &B23_vec_vor,
+ &B24_vec_vor,
+};
+static const struct builtin *const O_vec_pack[6] = {
+ &B1_vec_vpkuhum,
+ &B1_vec_vpkuwum,
+ &B2_vec_vpkuhum,
+ &B2_vec_vpkuwum,
+ &B3_vec_vpkuhum,
+ &B3_vec_vpkuwum,
+};
+static const struct builtin *const O_vec_packpx[1] = {
+ &B_vec_vpkpx,
+};
+static const struct builtin *const O_vec_packs[4] = {
+ &B_vec_vpkshss,
+ &B_vec_vpkswss,
+ &B_vec_vpkuhus,
+ &B_vec_vpkuwus,
+};
+static const struct builtin *const O_vec_packsu[4] = {
+ &B_vec_vpkshus,
+ &B_vec_vpkswus,
+ &B_vec_vpkuhus,
+ &B_vec_vpkuwus,
+};
+static const struct builtin *const O_vec_perm[11] = {
+ &B1_vec_vperm,
+ &B2_vec_vperm,
+ &B3_vec_vperm,
+ &B4_vec_vperm,
+ &B5_vec_vperm,
+ &B6_vec_vperm,
+ &B7_vec_vperm,
+ &B8_vec_vperm,
+ &B9_vec_vperm,
+ &B10_vec_vperm,
+ &B11_vec_vperm,
+};
+static const struct builtin *const O_vec_re[1] = {
+ &B_vec_vrefp,
+};
+static const struct builtin *const O_vec_rl[6] = {
+ &B1_vec_vrlh,
+ &B1_vec_vrlw,
+ &B1_vec_vrlb,
+ &B2_vec_vrlh,
+ &B2_vec_vrlw,
+ &B2_vec_vrlb,
+};
+static const struct builtin *const O_vec_round[1] = {
+ &B_vec_vrfin,
+};
+static const struct builtin *const O_vec_rsqrte[1] = {
+ &B_vec_vrsqrtefp,
+};
+static const struct builtin *const O_vec_sel[20] = {
+ &B1_vec_vsel,
+ &B2_vec_vsel,
+ &B3_vec_vsel,
+ &B4_vec_vsel,
+ &B5_vec_vsel,
+ &B6_vec_vsel,
+ &B7_vec_vsel,
+ &B8_vec_vsel,
+ &B9_vec_vsel,
+ &B10_vec_vsel,
+ &B11_vec_vsel,
+ &B12_vec_vsel,
+ &B13_vec_vsel,
+ &B14_vec_vsel,
+ &B15_vec_vsel,
+ &B16_vec_vsel,
+ &B17_vec_vsel,
+ &B18_vec_vsel,
+ &B19_vec_vsel,
+ &B20_vec_vsel,
+};
+static const struct builtin *const O_vec_sl[6] = {
+ &B1_vec_vslh,
+ &B1_vec_vslw,
+ &B1_vec_vslb,
+ &B2_vec_vslh,
+ &B2_vec_vslw,
+ &B2_vec_vslb,
+};
+static const struct builtin *const O_vec_sld[11] = {
+ &B1_vec_vsldoi,
+ &B2_vec_vsldoi,
+ &B3_vec_vsldoi,
+ &B4_vec_vsldoi,
+ &B5_vec_vsldoi,
+ &B6_vec_vsldoi,
+ &B7_vec_vsldoi,
+ &B8_vec_vsldoi,
+ &B9_vec_vsldoi,
+ &B10_vec_vsldoi,
+ &B11_vec_vsldoi,
+};
+static const struct builtin *const O_vec_sll[30] = {
+ &B1_vec_vsl,
+ &B2_vec_vsl,
+ &B3_vec_vsl,
+ &B4_vec_vsl,
+ &B5_vec_vsl,
+ &B6_vec_vsl,
+ &B7_vec_vsl,
+ &B8_vec_vsl,
+ &B9_vec_vsl,
+ &B10_vec_vsl,
+ &B11_vec_vsl,
+ &B12_vec_vsl,
+ &B13_vec_vsl,
+ &B14_vec_vsl,
+ &B15_vec_vsl,
+ &B16_vec_vsl,
+ &B17_vec_vsl,
+ &B18_vec_vsl,
+ &B19_vec_vsl,
+ &B20_vec_vsl,
+ &B21_vec_vsl,
+ &B22_vec_vsl,
+ &B23_vec_vsl,
+ &B24_vec_vsl,
+ &B25_vec_vsl,
+ &B26_vec_vsl,
+ &B27_vec_vsl,
+ &B28_vec_vsl,
+ &B29_vec_vsl,
+ &B30_vec_vsl,
+};
+static const struct builtin *const O_vec_slo[16] = {
+ &B1_vec_vslo,
+ &B2_vec_vslo,
+ &B3_vec_vslo,
+ &B4_vec_vslo,
+ &B5_vec_vslo,
+ &B6_vec_vslo,
+ &B7_vec_vslo,
+ &B8_vec_vslo,
+ &B9_vec_vslo,
+ &B10_vec_vslo,
+ &B11_vec_vslo,
+ &B12_vec_vslo,
+ &B13_vec_vslo,
+ &B14_vec_vslo,
+ &B15_vec_vslo,
+ &B16_vec_vslo,
+};
+static const struct builtin *const O_vec_splat[11] = {
+ &B1_vec_vsplth,
+ &B1_vec_vspltw,
+ &B1_vec_vspltb,
+ &B2_vec_vspltw,
+ &B2_vec_vsplth,
+ &B3_vec_vsplth,
+ &B3_vec_vspltw,
+ &B2_vec_vspltb,
+ &B4_vec_vsplth,
+ &B4_vec_vspltw,
+ &B3_vec_vspltb,
+};
+static const struct builtin *const O_vec_splat_s16[1] = {
+ &B_vec_vspltish,
+};
+static const struct builtin *const O_vec_splat_s32[1] = {
+ &B_vec_vspltisw,
+};
+static const struct builtin *const O_vec_splat_s8[1] = {
+ &B_vec_vspltisb,
+};
+static const struct builtin *const O_vec_splat_u16[1] = {
+ &B_vec_splat_u16,
+};
+static const struct builtin *const O_vec_splat_u32[1] = {
+ &B_vec_splat_u32,
+};
+static const struct builtin *const O_vec_splat_u8[1] = {
+ &B_vec_splat_u8,
+};
+static const struct builtin *const O_vec_sr[6] = {
+ &B1_vec_vsrh,
+ &B1_vec_vsrw,
+ &B1_vec_vsrb,
+ &B2_vec_vsrh,
+ &B2_vec_vsrw,
+ &B2_vec_vsrb,
+};
+static const struct builtin *const O_vec_sra[6] = {
+ &B1_vec_vsrah,
+ &B1_vec_vsraw,
+ &B1_vec_vsrab,
+ &B2_vec_vsrah,
+ &B2_vec_vsraw,
+ &B2_vec_vsrab,
+};
+static const struct builtin *const O_vec_srl[30] = {
+ &B1_vec_vsr,
+ &B2_vec_vsr,
+ &B3_vec_vsr,
+ &B4_vec_vsr,
+ &B5_vec_vsr,
+ &B6_vec_vsr,
+ &B7_vec_vsr,
+ &B8_vec_vsr,
+ &B9_vec_vsr,
+ &B10_vec_vsr,
+ &B11_vec_vsr,
+ &B12_vec_vsr,
+ &B13_vec_vsr,
+ &B14_vec_vsr,
+ &B15_vec_vsr,
+ &B16_vec_vsr,
+ &B17_vec_vsr,
+ &B18_vec_vsr,
+ &B19_vec_vsr,
+ &B20_vec_vsr,
+ &B21_vec_vsr,
+ &B22_vec_vsr,
+ &B23_vec_vsr,
+ &B24_vec_vsr,
+ &B25_vec_vsr,
+ &B26_vec_vsr,
+ &B27_vec_vsr,
+ &B28_vec_vsr,
+ &B29_vec_vsr,
+ &B30_vec_vsr,
+};
+static const struct builtin *const O_vec_sro[16] = {
+ &B1_vec_vsro,
+ &B2_vec_vsro,
+ &B3_vec_vsro,
+ &B4_vec_vsro,
+ &B5_vec_vsro,
+ &B6_vec_vsro,
+ &B7_vec_vsro,
+ &B8_vec_vsro,
+ &B9_vec_vsro,
+ &B10_vec_vsro,
+ &B11_vec_vsro,
+ &B12_vec_vsro,
+ &B13_vec_vsro,
+ &B14_vec_vsro,
+ &B15_vec_vsro,
+ &B16_vec_vsro,
+};
+static const struct builtin *const O_vec_st[30] = {
+ &B1_vec_stvx,
+ &B2_vec_stvx,
+ &B3_vec_stvx,
+ &B4_vec_stvx,
+ &B5_vec_stvx,
+ &B6_vec_stvx,
+ &B7_vec_stvx,
+ &B8_vec_stvx,
+ &B9_vec_stvx,
+ &B10_vec_stvx,
+ &B11_vec_stvx,
+ &B12_vec_stvx,
+ &B13_vec_stvx,
+ &B14_vec_stvx,
+ &B15_vec_stvx,
+ &B16_vec_stvx,
+ &B17_vec_stvx,
+ &B18_vec_stvx,
+ &B19_vec_stvx,
+ &B20_vec_stvx,
+ &B21_vec_stvx,
+ &B22_vec_stvx,
+ &B23_vec_stvx,
+ &B24_vec_stvx,
+ &B25_vec_stvx,
+ &B26_vec_stvx,
+ &B27_vec_stvx,
+ &B28_vec_stvx,
+ &B29_vec_stvx,
+ &B30_vec_stvx,
+};
+static const struct builtin *const O_vec_ste[19] = {
+ &B1_vec_stvebx,
+ &B2_vec_stvebx,
+ &B1_vec_stvewx,
+ &B2_vec_stvewx,
+ &B3_vec_stvewx,
+ &B4_vec_stvewx,
+ &B3_vec_stvebx,
+ &B4_vec_stvebx,
+ &B5_vec_stvewx,
+ &B1_vec_stvehx,
+ &B2_vec_stvehx,
+ &B3_vec_stvehx,
+ &B6_vec_stvewx,
+ &B7_vec_stvewx,
+ &B5_vec_stvebx,
+ &B4_vec_stvehx,
+ &B8_vec_stvewx,
+ &B9_vec_stvewx,
+ &B6_vec_stvebx,
+};
+static const struct builtin *const O_vec_stl[30] = {
+ &B1_vec_stvxl,
+ &B2_vec_stvxl,
+ &B3_vec_stvxl,
+ &B4_vec_stvxl,
+ &B5_vec_stvxl,
+ &B6_vec_stvxl,
+ &B7_vec_stvxl,
+ &B8_vec_stvxl,
+ &B9_vec_stvxl,
+ &B10_vec_stvxl,
+ &B11_vec_stvxl,
+ &B12_vec_stvxl,
+ &B13_vec_stvxl,
+ &B14_vec_stvxl,
+ &B15_vec_stvxl,
+ &B16_vec_stvxl,
+ &B17_vec_stvxl,
+ &B18_vec_stvxl,
+ &B19_vec_stvxl,
+ &B20_vec_stvxl,
+ &B21_vec_stvxl,
+ &B22_vec_stvxl,
+ &B23_vec_stvxl,
+ &B24_vec_stvxl,
+ &B25_vec_stvxl,
+ &B26_vec_stvxl,
+ &B27_vec_stvxl,
+ &B28_vec_stvxl,
+ &B29_vec_stvxl,
+ &B30_vec_stvxl,
+};
+static const struct builtin *const O_vec_stvebx[6] = {
+ &B1_vec_stvebx,
+ &B2_vec_stvebx,
+ &B3_vec_stvebx,
+ &B4_vec_stvebx,
+ &B5_vec_stvebx,
+ &B6_vec_stvebx,
+};
+static const struct builtin *const O_vec_stvehx[4] = {
+ &B1_vec_stvehx,
+ &B2_vec_stvehx,
+ &B3_vec_stvehx,
+ &B4_vec_stvehx,
+};
+static const struct builtin *const O_vec_stvewx[9] = {
+ &B1_vec_stvewx,
+ &B2_vec_stvewx,
+ &B3_vec_stvewx,
+ &B4_vec_stvewx,
+ &B5_vec_stvewx,
+ &B6_vec_stvewx,
+ &B7_vec_stvewx,
+ &B8_vec_stvewx,
+ &B9_vec_stvewx,
+};
+static const struct builtin *const O_vec_stvx[30] = {
+ &B1_vec_stvx,
+ &B2_vec_stvx,
+ &B3_vec_stvx,
+ &B4_vec_stvx,
+ &B5_vec_stvx,
+ &B6_vec_stvx,
+ &B7_vec_stvx,
+ &B8_vec_stvx,
+ &B9_vec_stvx,
+ &B10_vec_stvx,
+ &B11_vec_stvx,
+ &B12_vec_stvx,
+ &B13_vec_stvx,
+ &B14_vec_stvx,
+ &B15_vec_stvx,
+ &B16_vec_stvx,
+ &B17_vec_stvx,
+ &B18_vec_stvx,
+ &B19_vec_stvx,
+ &B20_vec_stvx,
+ &B21_vec_stvx,
+ &B22_vec_stvx,
+ &B23_vec_stvx,
+ &B24_vec_stvx,
+ &B25_vec_stvx,
+ &B26_vec_stvx,
+ &B27_vec_stvx,
+ &B28_vec_stvx,
+ &B29_vec_stvx,
+ &B30_vec_stvx,
+};
+static const struct builtin *const O_vec_stvxl[30] = {
+ &B1_vec_stvxl,
+ &B2_vec_stvxl,
+ &B3_vec_stvxl,
+ &B4_vec_stvxl,
+ &B5_vec_stvxl,
+ &B6_vec_stvxl,
+ &B7_vec_stvxl,
+ &B8_vec_stvxl,
+ &B9_vec_stvxl,
+ &B10_vec_stvxl,
+ &B11_vec_stvxl,
+ &B12_vec_stvxl,
+ &B13_vec_stvxl,
+ &B14_vec_stvxl,
+ &B15_vec_stvxl,
+ &B16_vec_stvxl,
+ &B17_vec_stvxl,
+ &B18_vec_stvxl,
+ &B19_vec_stvxl,
+ &B20_vec_stvxl,
+ &B21_vec_stvxl,
+ &B22_vec_stvxl,
+ &B23_vec_stvxl,
+ &B24_vec_stvxl,
+ &B25_vec_stvxl,
+ &B26_vec_stvxl,
+ &B27_vec_stvxl,
+ &B28_vec_stvxl,
+ &B29_vec_stvxl,
+ &B30_vec_stvxl,
+};
+static const struct builtin *const O_vec_sub[19] = {
+ &B1_vec_vsubuhm,
+ &B2_vec_vsubuhm,
+ &B1_vec_vsubuwm,
+ &B2_vec_vsubuwm,
+ &B1_vec_vsububm,
+ &B2_vec_vsububm,
+ &B_vec_vsubfp,
+ &B3_vec_vsubuhm,
+ &B4_vec_vsubuhm,
+ &B3_vec_vsubuwm,
+ &B4_vec_vsubuwm,
+ &B3_vec_vsububm,
+ &B4_vec_vsububm,
+ &B5_vec_vsubuhm,
+ &B6_vec_vsubuhm,
+ &B5_vec_vsubuwm,
+ &B6_vec_vsubuwm,
+ &B5_vec_vsububm,
+ &B6_vec_vsububm,
+};
+static const struct builtin *const O_vec_subc[1] = {
+ &B_vec_vsubcuw,
+};
+static const struct builtin *const O_vec_subs[18] = {
+ &B1_vec_vsubshs,
+ &B1_vec_vsubuhs,
+ &B1_vec_vsubsws,
+ &B1_vec_vsubuws,
+ &B1_vec_vsubsbs,
+ &B1_vec_vsububs,
+ &B2_vec_vsubshs,
+ &B3_vec_vsubshs,
+ &B2_vec_vsubsws,
+ &B3_vec_vsubsws,
+ &B2_vec_vsubsbs,
+ &B3_vec_vsubsbs,
+ &B2_vec_vsubuhs,
+ &B3_vec_vsubuhs,
+ &B2_vec_vsubuws,
+ &B3_vec_vsubuws,
+ &B2_vec_vsububs,
+ &B3_vec_vsububs,
+};
+static const struct builtin *const O_vec_sum2s[1] = {
+ &B_vec_vsum2sws,
+};
+static const struct builtin *const O_vec_sum4s[3] = {
+ &B_vec_vsum4shs,
+ &B_vec_vsum4sbs,
+ &B_vec_vsum4ubs,
+};
+static const struct builtin *const O_vec_sums[1] = {
+ &B_vec_vsumsws,
+};
+static const struct builtin *const O_vec_trunc[1] = {
+ &B_vec_vrfiz,
+};
+static const struct builtin *const O_vec_unpack2sh[2] = {
+ &B1_vec_unpack2sh,
+ &B2_vec_unpack2sh,
+};
+static const struct builtin *const O_vec_unpack2sl[2] = {
+ &B1_vec_unpack2sl,
+ &B2_vec_unpack2sl,
+};
+static const struct builtin *const O_vec_unpack2uh[2] = {
+ &B1_vec_unpack2uh,
+ &B2_vec_unpack2uh,
+};
+static const struct builtin *const O_vec_unpack2ul[2] = {
+ &B1_vec_unpack2ul,
+ &B2_vec_unpack2ul,
+};
+static const struct builtin *const O_vec_unpackh[5] = {
+ &B1_vec_vupkhsh,
+ &B1_vec_vupkhsb,
+ &B_vec_vupkhpx,
+ &B2_vec_vupkhsh,
+ &B2_vec_vupkhsb,
+};
+static const struct builtin *const O_vec_unpackl[5] = {
+ &B1_vec_vupklsh,
+ &B1_vec_vupklsb,
+ &B_vec_vupklpx,
+ &B2_vec_vupklsh,
+ &B2_vec_vupklsb,
+};
+static const struct builtin *const O_vec_vaddcuw[1] = {
+ &B_vec_vaddcuw,
+};
+static const struct builtin *const O_vec_vaddfp[1] = {
+ &B_vec_vaddfp,
+};
+static const struct builtin *const O_vec_vaddsbs[3] = {
+ &B1_vec_vaddsbs,
+ &B2_vec_vaddsbs,
+ &B3_vec_vaddsbs,
+};
+static const struct builtin *const O_vec_vaddshs[3] = {
+ &B1_vec_vaddshs,
+ &B2_vec_vaddshs,
+ &B3_vec_vaddshs,
+};
+static const struct builtin *const O_vec_vaddsws[3] = {
+ &B1_vec_vaddsws,
+ &B2_vec_vaddsws,
+ &B3_vec_vaddsws,
+};
+static const struct builtin *const O_vec_vaddubm[6] = {
+ &B1_vec_vaddubm,
+ &B2_vec_vaddubm,
+ &B3_vec_vaddubm,
+ &B4_vec_vaddubm,
+ &B5_vec_vaddubm,
+ &B6_vec_vaddubm,
+};
+static const struct builtin *const O_vec_vaddubs[3] = {
+ &B1_vec_vaddubs,
+ &B2_vec_vaddubs,
+ &B3_vec_vaddubs,
+};
+static const struct builtin *const O_vec_vadduhm[6] = {
+ &B1_vec_vadduhm,
+ &B2_vec_vadduhm,
+ &B3_vec_vadduhm,
+ &B4_vec_vadduhm,
+ &B5_vec_vadduhm,
+ &B6_vec_vadduhm,
+};
+static const struct builtin *const O_vec_vadduhs[3] = {
+ &B1_vec_vadduhs,
+ &B2_vec_vadduhs,
+ &B3_vec_vadduhs,
+};
+static const struct builtin *const O_vec_vadduwm[6] = {
+ &B1_vec_vadduwm,
+ &B2_vec_vadduwm,
+ &B3_vec_vadduwm,
+ &B4_vec_vadduwm,
+ &B5_vec_vadduwm,
+ &B6_vec_vadduwm,
+};
+static const struct builtin *const O_vec_vadduws[3] = {
+ &B1_vec_vadduws,
+ &B2_vec_vadduws,
+ &B3_vec_vadduws,
+};
+static const struct builtin *const O_vec_vand[24] = {
+ &B1_vec_vand,
+ &B2_vec_vand,
+ &B3_vec_vand,
+ &B4_vec_vand,
+ &B5_vec_vand,
+ &B6_vec_vand,
+ &B7_vec_vand,
+ &B8_vec_vand,
+ &B9_vec_vand,
+ &B10_vec_vand,
+ &B11_vec_vand,
+ &B12_vec_vand,
+ &B13_vec_vand,
+ &B14_vec_vand,
+ &B15_vec_vand,
+ &B16_vec_vand,
+ &B17_vec_vand,
+ &B18_vec_vand,
+ &B19_vec_vand,
+ &B20_vec_vand,
+ &B21_vec_vand,
+ &B22_vec_vand,
+ &B23_vec_vand,
+ &B24_vec_vand,
+};
+static const struct builtin *const O_vec_vandc[24] = {
+ &B1_vec_vandc,
+ &B2_vec_vandc,
+ &B3_vec_vandc,
+ &B4_vec_vandc,
+ &B5_vec_vandc,
+ &B6_vec_vandc,
+ &B7_vec_vandc,
+ &B8_vec_vandc,
+ &B9_vec_vandc,
+ &B10_vec_vandc,
+ &B11_vec_vandc,
+ &B12_vec_vandc,
+ &B13_vec_vandc,
+ &B14_vec_vandc,
+ &B15_vec_vandc,
+ &B16_vec_vandc,
+ &B17_vec_vandc,
+ &B18_vec_vandc,
+ &B19_vec_vandc,
+ &B20_vec_vandc,
+ &B21_vec_vandc,
+ &B22_vec_vandc,
+ &B23_vec_vandc,
+ &B24_vec_vandc,
+};
+static const struct builtin *const O_vec_vavgsb[1] = {
+ &B_vec_vavgsb,
+};
+static const struct builtin *const O_vec_vavgsh[1] = {
+ &B_vec_vavgsh,
+};
+static const struct builtin *const O_vec_vavgsw[1] = {
+ &B_vec_vavgsw,
+};
+static const struct builtin *const O_vec_vavgub[1] = {
+ &B_vec_vavgub,
+};
+static const struct builtin *const O_vec_vavguh[1] = {
+ &B_vec_vavguh,
+};
+static const struct builtin *const O_vec_vavguw[1] = {
+ &B_vec_vavguw,
+};
+static const struct builtin *const O_vec_vcfsx[1] = {
+ &B_vec_vcfsx,
+};
+static const struct builtin *const O_vec_vcfux[1] = {
+ &B_vec_vcfux,
+};
+static const struct builtin *const O_vec_vcmpbfp[1] = {
+ &B_vec_vcmpbfp,
+};
+static const struct builtin *const O_vec_vcmpeqfp[1] = {
+ &B_vec_vcmpeqfp,
+};
+static const struct builtin *const O_vec_vcmpequb[2] = {
+ &B1_vec_vcmpequb,
+ &B2_vec_vcmpequb,
+};
+static const struct builtin *const O_vec_vcmpequh[2] = {
+ &B1_vec_vcmpequh,
+ &B2_vec_vcmpequh,
+};
+static const struct builtin *const O_vec_vcmpequw[2] = {
+ &B1_vec_vcmpequw,
+ &B2_vec_vcmpequw,
+};
+static const struct builtin *const O_vec_vcmpgefp[1] = {
+ &B_vec_vcmpgefp,
+};
+static const struct builtin *const O_vec_vcmpgtfp[1] = {
+ &B_vec_vcmpgtfp,
+};
+static const struct builtin *const O_vec_vcmpgtsb[1] = {
+ &B_vec_vcmpgtsb,
+};
+static const struct builtin *const O_vec_vcmpgtsh[1] = {
+ &B_vec_vcmpgtsh,
+};
+static const struct builtin *const O_vec_vcmpgtsw[1] = {
+ &B_vec_vcmpgtsw,
+};
+static const struct builtin *const O_vec_vcmpgtub[1] = {
+ &B_vec_vcmpgtub,
+};
+static const struct builtin *const O_vec_vcmpgtuh[1] = {
+ &B_vec_vcmpgtuh,
+};
+static const struct builtin *const O_vec_vcmpgtuw[1] = {
+ &B_vec_vcmpgtuw,
+};
+static const struct builtin *const O_vec_vctsxs[1] = {
+ &B_vec_vctsxs,
+};
+static const struct builtin *const O_vec_vctuxs[1] = {
+ &B_vec_vctuxs,
+};
+static const struct builtin *const O_vec_vexptefp[1] = {
+ &B_vec_vexptefp,
+};
+static const struct builtin *const O_vec_vlogefp[1] = {
+ &B_vec_vlogefp,
+};
+static const struct builtin *const O_vec_vmaddfp[1] = {
+ &B_vec_vmaddfp,
+};
+static const struct builtin *const O_vec_vmaxfp[1] = {
+ &B_vec_vmaxfp,
+};
+static const struct builtin *const O_vec_vmaxsb[3] = {
+ &B1_vec_vmaxsb,
+ &B2_vec_vmaxsb,
+ &B3_vec_vmaxsb,
+};
+static const struct builtin *const O_vec_vmaxsh[3] = {
+ &B1_vec_vmaxsh,
+ &B2_vec_vmaxsh,
+ &B3_vec_vmaxsh,
+};
+static const struct builtin *const O_vec_vmaxsw[3] = {
+ &B1_vec_vmaxsw,
+ &B2_vec_vmaxsw,
+ &B3_vec_vmaxsw,
+};
+static const struct builtin *const O_vec_vmaxub[3] = {
+ &B1_vec_vmaxub,
+ &B2_vec_vmaxub,
+ &B3_vec_vmaxub,
+};
+static const struct builtin *const O_vec_vmaxuh[3] = {
+ &B1_vec_vmaxuh,
+ &B2_vec_vmaxuh,
+ &B3_vec_vmaxuh,
+};
+static const struct builtin *const O_vec_vmaxuw[3] = {
+ &B1_vec_vmaxuw,
+ &B2_vec_vmaxuw,
+ &B3_vec_vmaxuw,
+};
+static const struct builtin *const O_vec_vmhaddshs[1] = {
+ &B_vec_vmhaddshs,
+};
+static const struct builtin *const O_vec_vmhraddshs[1] = {
+ &B_vec_vmhraddshs,
+};
+static const struct builtin *const O_vec_vminfp[1] = {
+ &B_vec_vminfp,
+};
+static const struct builtin *const O_vec_vminsb[3] = {
+ &B1_vec_vminsb,
+ &B2_vec_vminsb,
+ &B3_vec_vminsb,
+};
+static const struct builtin *const O_vec_vminsh[3] = {
+ &B1_vec_vminsh,
+ &B2_vec_vminsh,
+ &B3_vec_vminsh,
+};
+static const struct builtin *const O_vec_vminsw[3] = {
+ &B1_vec_vminsw,
+ &B2_vec_vminsw,
+ &B3_vec_vminsw,
+};
+static const struct builtin *const O_vec_vminub[3] = {
+ &B1_vec_vminub,
+ &B2_vec_vminub,
+ &B3_vec_vminub,
+};
+static const struct builtin *const O_vec_vminuh[3] = {
+ &B1_vec_vminuh,
+ &B2_vec_vminuh,
+ &B3_vec_vminuh,
+};
+static const struct builtin *const O_vec_vminuw[3] = {
+ &B1_vec_vminuw,
+ &B2_vec_vminuw,
+ &B3_vec_vminuw,
+};
+static const struct builtin *const O_vec_vmladduhm[4] = {
+ &B1_vec_vmladduhm,
+ &B2_vec_vmladduhm,
+ &B3_vec_vmladduhm,
+ &B4_vec_vmladduhm,
+};
+static const struct builtin *const O_vec_vmrghb[3] = {
+ &B1_vec_vmrghb,
+ &B2_vec_vmrghb,
+ &B3_vec_vmrghb,
+};
+static const struct builtin *const O_vec_vmrghh[4] = {
+ &B1_vec_vmrghh,
+ &B2_vec_vmrghh,
+ &B3_vec_vmrghh,
+ &B4_vec_vmrghh,
+};
+static const struct builtin *const O_vec_vmrghw[4] = {
+ &B1_vec_vmrghw,
+ &B2_vec_vmrghw,
+ &B3_vec_vmrghw,
+ &B4_vec_vmrghw,
+};
+static const struct builtin *const O_vec_vmrglb[3] = {
+ &B1_vec_vmrglb,
+ &B2_vec_vmrglb,
+ &B3_vec_vmrglb,
+};
+static const struct builtin *const O_vec_vmrglh[4] = {
+ &B1_vec_vmrglh,
+ &B2_vec_vmrglh,
+ &B3_vec_vmrglh,
+ &B4_vec_vmrglh,
+};
+static const struct builtin *const O_vec_vmrglw[4] = {
+ &B1_vec_vmrglw,
+ &B2_vec_vmrglw,
+ &B3_vec_vmrglw,
+ &B4_vec_vmrglw,
+};
+static const struct builtin *const O_vec_vmsummbm[1] = {
+ &B_vec_vmsummbm,
+};
+static const struct builtin *const O_vec_vmsumshm[1] = {
+ &B_vec_vmsumshm,
+};
+static const struct builtin *const O_vec_vmsumshs[1] = {
+ &B_vec_vmsumshs,
+};
+static const struct builtin *const O_vec_vmsumubm[1] = {
+ &B_vec_vmsumubm,
+};
+static const struct builtin *const O_vec_vmsumuhm[1] = {
+ &B_vec_vmsumuhm,
+};
+static const struct builtin *const O_vec_vmsumuhs[1] = {
+ &B_vec_vmsumuhs,
+};
+static const struct builtin *const O_vec_vmulesb[1] = {
+ &B_vec_vmulesb,
+};
+static const struct builtin *const O_vec_vmulesh[1] = {
+ &B_vec_vmulesh,
+};
+static const struct builtin *const O_vec_vmuleub[1] = {
+ &B_vec_vmuleub,
+};
+static const struct builtin *const O_vec_vmuleuh[1] = {
+ &B_vec_vmuleuh,
+};
+static const struct builtin *const O_vec_vmulosb[1] = {
+ &B_vec_vmulosb,
+};
+static const struct builtin *const O_vec_vmulosh[1] = {
+ &B_vec_vmulosh,
+};
+static const struct builtin *const O_vec_vmuloub[1] = {
+ &B_vec_vmuloub,
+};
+static const struct builtin *const O_vec_vmulouh[1] = {
+ &B_vec_vmulouh,
+};
+static const struct builtin *const O_vec_vnmsubfp[1] = {
+ &B_vec_vnmsubfp,
+};
+static const struct builtin *const O_vec_vnor[10] = {
+ &B1_vec_vnor,
+ &B2_vec_vnor,
+ &B3_vec_vnor,
+ &B4_vec_vnor,
+ &B5_vec_vnor,
+ &B6_vec_vnor,
+ &B7_vec_vnor,
+ &B8_vec_vnor,
+ &B9_vec_vnor,
+ &B10_vec_vnor,
+};
+static const struct builtin *const O_vec_vor[24] = {
+ &B1_vec_vor,
+ &B2_vec_vor,
+ &B3_vec_vor,
+ &B4_vec_vor,
+ &B5_vec_vor,
+ &B6_vec_vor,
+ &B7_vec_vor,
+ &B8_vec_vor,
+ &B9_vec_vor,
+ &B10_vec_vor,
+ &B11_vec_vor,
+ &B12_vec_vor,
+ &B13_vec_vor,
+ &B14_vec_vor,
+ &B15_vec_vor,
+ &B16_vec_vor,
+ &B17_vec_vor,
+ &B18_vec_vor,
+ &B19_vec_vor,
+ &B20_vec_vor,
+ &B21_vec_vor,
+ &B22_vec_vor,
+ &B23_vec_vor,
+ &B24_vec_vor,
+};
+static const struct builtin *const O_vec_vperm[11] = {
+ &B1_vec_vperm,
+ &B2_vec_vperm,
+ &B3_vec_vperm,
+ &B4_vec_vperm,
+ &B5_vec_vperm,
+ &B6_vec_vperm,
+ &B7_vec_vperm,
+ &B8_vec_vperm,
+ &B9_vec_vperm,
+ &B10_vec_vperm,
+ &B11_vec_vperm,
+};
+static const struct builtin *const O_vec_vpkpx[1] = {
+ &B_vec_vpkpx,
+};
+static const struct builtin *const O_vec_vpkshss[1] = {
+ &B_vec_vpkshss,
+};
+static const struct builtin *const O_vec_vpkshus[1] = {
+ &B_vec_vpkshus,
+};
+static const struct builtin *const O_vec_vpkswss[1] = {
+ &B_vec_vpkswss,
+};
+static const struct builtin *const O_vec_vpkswus[1] = {
+ &B_vec_vpkswus,
+};
+static const struct builtin *const O_vec_vpkuhum[3] = {
+ &B1_vec_vpkuhum,
+ &B2_vec_vpkuhum,
+ &B3_vec_vpkuhum,
+};
+static const struct builtin *const O_vec_vpkuhus[1] = {
+ &B_vec_vpkuhus,
+};
+static const struct builtin *const O_vec_vpkuwum[3] = {
+ &B1_vec_vpkuwum,
+ &B2_vec_vpkuwum,
+ &B3_vec_vpkuwum,
+};
+static const struct builtin *const O_vec_vpkuwus[1] = {
+ &B_vec_vpkuwus,
+};
+static const struct builtin *const O_vec_vrefp[1] = {
+ &B_vec_vrefp,
+};
+static const struct builtin *const O_vec_vrfim[1] = {
+ &B_vec_vrfim,
+};
+static const struct builtin *const O_vec_vrfin[1] = {
+ &B_vec_vrfin,
+};
+static const struct builtin *const O_vec_vrfip[1] = {
+ &B_vec_vrfip,
+};
+static const struct builtin *const O_vec_vrfiz[1] = {
+ &B_vec_vrfiz,
+};
+static const struct builtin *const O_vec_vrlb[2] = {
+ &B1_vec_vrlb,
+ &B2_vec_vrlb,
+};
+static const struct builtin *const O_vec_vrlh[2] = {
+ &B1_vec_vrlh,
+ &B2_vec_vrlh,
+};
+static const struct builtin *const O_vec_vrlw[2] = {
+ &B1_vec_vrlw,
+ &B2_vec_vrlw,
+};
+static const struct builtin *const O_vec_vrsqrtefp[1] = {
+ &B_vec_vrsqrtefp,
+};
+static const struct builtin *const O_vec_vsel[20] = {
+ &B1_vec_vsel,
+ &B2_vec_vsel,
+ &B3_vec_vsel,
+ &B4_vec_vsel,
+ &B5_vec_vsel,
+ &B6_vec_vsel,
+ &B7_vec_vsel,
+ &B8_vec_vsel,
+ &B9_vec_vsel,
+ &B10_vec_vsel,
+ &B11_vec_vsel,
+ &B12_vec_vsel,
+ &B13_vec_vsel,
+ &B14_vec_vsel,
+ &B15_vec_vsel,
+ &B16_vec_vsel,
+ &B17_vec_vsel,
+ &B18_vec_vsel,
+ &B19_vec_vsel,
+ &B20_vec_vsel,
+};
+static const struct builtin *const O_vec_vsl[30] = {
+ &B1_vec_vsl,
+ &B2_vec_vsl,
+ &B3_vec_vsl,
+ &B4_vec_vsl,
+ &B5_vec_vsl,
+ &B6_vec_vsl,
+ &B7_vec_vsl,
+ &B8_vec_vsl,
+ &B9_vec_vsl,
+ &B10_vec_vsl,
+ &B11_vec_vsl,
+ &B12_vec_vsl,
+ &B13_vec_vsl,
+ &B14_vec_vsl,
+ &B15_vec_vsl,
+ &B16_vec_vsl,
+ &B17_vec_vsl,
+ &B18_vec_vsl,
+ &B19_vec_vsl,
+ &B20_vec_vsl,
+ &B21_vec_vsl,
+ &B22_vec_vsl,
+ &B23_vec_vsl,
+ &B24_vec_vsl,
+ &B25_vec_vsl,
+ &B26_vec_vsl,
+ &B27_vec_vsl,
+ &B28_vec_vsl,
+ &B29_vec_vsl,
+ &B30_vec_vsl,
+};
+static const struct builtin *const O_vec_vslb[2] = {
+ &B1_vec_vslb,
+ &B2_vec_vslb,
+};
+static const struct builtin *const O_vec_vsldoi[11] = {
+ &B1_vec_vsldoi,
+ &B2_vec_vsldoi,
+ &B3_vec_vsldoi,
+ &B4_vec_vsldoi,
+ &B5_vec_vsldoi,
+ &B6_vec_vsldoi,
+ &B7_vec_vsldoi,
+ &B8_vec_vsldoi,
+ &B9_vec_vsldoi,
+ &B10_vec_vsldoi,
+ &B11_vec_vsldoi,
+};
+static const struct builtin *const O_vec_vslh[2] = {
+ &B1_vec_vslh,
+ &B2_vec_vslh,
+};
+static const struct builtin *const O_vec_vslo[16] = {
+ &B1_vec_vslo,
+ &B2_vec_vslo,
+ &B3_vec_vslo,
+ &B4_vec_vslo,
+ &B5_vec_vslo,
+ &B6_vec_vslo,
+ &B7_vec_vslo,
+ &B8_vec_vslo,
+ &B9_vec_vslo,
+ &B10_vec_vslo,
+ &B11_vec_vslo,
+ &B12_vec_vslo,
+ &B13_vec_vslo,
+ &B14_vec_vslo,
+ &B15_vec_vslo,
+ &B16_vec_vslo,
+};
+static const struct builtin *const O_vec_vslw[2] = {
+ &B1_vec_vslw,
+ &B2_vec_vslw,
+};
+static const struct builtin *const O_vec_vspltb[3] = {
+ &B1_vec_vspltb,
+ &B2_vec_vspltb,
+ &B3_vec_vspltb,
+};
+static const struct builtin *const O_vec_vsplth[4] = {
+ &B1_vec_vsplth,
+ &B2_vec_vsplth,
+ &B3_vec_vsplth,
+ &B4_vec_vsplth,
+};
+static const struct builtin *const O_vec_vspltisb[1] = {
+ &B_vec_vspltisb,
+};
+static const struct builtin *const O_vec_vspltish[1] = {
+ &B_vec_vspltish,
+};
+static const struct builtin *const O_vec_vspltisw[1] = {
+ &B_vec_vspltisw,
+};
+static const struct builtin *const O_vec_vspltw[4] = {
+ &B1_vec_vspltw,
+ &B2_vec_vspltw,
+ &B3_vec_vspltw,
+ &B4_vec_vspltw,
+};
+static const struct builtin *const O_vec_vsr[30] = {
+ &B1_vec_vsr,
+ &B2_vec_vsr,
+ &B3_vec_vsr,
+ &B4_vec_vsr,
+ &B5_vec_vsr,
+ &B6_vec_vsr,
+ &B7_vec_vsr,
+ &B8_vec_vsr,
+ &B9_vec_vsr,
+ &B10_vec_vsr,
+ &B11_vec_vsr,
+ &B12_vec_vsr,
+ &B13_vec_vsr,
+ &B14_vec_vsr,
+ &B15_vec_vsr,
+ &B16_vec_vsr,
+ &B17_vec_vsr,
+ &B18_vec_vsr,
+ &B19_vec_vsr,
+ &B20_vec_vsr,
+ &B21_vec_vsr,
+ &B22_vec_vsr,
+ &B23_vec_vsr,
+ &B24_vec_vsr,
+ &B25_vec_vsr,
+ &B26_vec_vsr,
+ &B27_vec_vsr,
+ &B28_vec_vsr,
+ &B29_vec_vsr,
+ &B30_vec_vsr,
+};
+static const struct builtin *const O_vec_vsrab[2] = {
+ &B1_vec_vsrab,
+ &B2_vec_vsrab,
+};
+static const struct builtin *const O_vec_vsrah[2] = {
+ &B1_vec_vsrah,
+ &B2_vec_vsrah,
+};
+static const struct builtin *const O_vec_vsraw[2] = {
+ &B1_vec_vsraw,
+ &B2_vec_vsraw,
+};
+static const struct builtin *const O_vec_vsrb[2] = {
+ &B1_vec_vsrb,
+ &B2_vec_vsrb,
+};
+static const struct builtin *const O_vec_vsrh[2] = {
+ &B1_vec_vsrh,
+ &B2_vec_vsrh,
+};
+static const struct builtin *const O_vec_vsro[16] = {
+ &B1_vec_vsro,
+ &B2_vec_vsro,
+ &B3_vec_vsro,
+ &B4_vec_vsro,
+ &B5_vec_vsro,
+ &B6_vec_vsro,
+ &B7_vec_vsro,
+ &B8_vec_vsro,
+ &B9_vec_vsro,
+ &B10_vec_vsro,
+ &B11_vec_vsro,
+ &B12_vec_vsro,
+ &B13_vec_vsro,
+ &B14_vec_vsro,
+ &B15_vec_vsro,
+ &B16_vec_vsro,
+};
+static const struct builtin *const O_vec_vsrw[2] = {
+ &B1_vec_vsrw,
+ &B2_vec_vsrw,
+};
+static const struct builtin *const O_vec_vsubcuw[1] = {
+ &B_vec_vsubcuw,
+};
+static const struct builtin *const O_vec_vsubfp[1] = {
+ &B_vec_vsubfp,
+};
+static const struct builtin *const O_vec_vsubsbs[3] = {
+ &B1_vec_vsubsbs,
+ &B2_vec_vsubsbs,
+ &B3_vec_vsubsbs,
+};
+static const struct builtin *const O_vec_vsubshs[3] = {
+ &B1_vec_vsubshs,
+ &B2_vec_vsubshs,
+ &B3_vec_vsubshs,
+};
+static const struct builtin *const O_vec_vsubsws[3] = {
+ &B1_vec_vsubsws,
+ &B2_vec_vsubsws,
+ &B3_vec_vsubsws,
+};
+static const struct builtin *const O_vec_vsububm[6] = {
+ &B1_vec_vsububm,
+ &B2_vec_vsububm,
+ &B3_vec_vsububm,
+ &B4_vec_vsububm,
+ &B5_vec_vsububm,
+ &B6_vec_vsububm,
+};
+static const struct builtin *const O_vec_vsububs[3] = {
+ &B1_vec_vsububs,
+ &B2_vec_vsububs,
+ &B3_vec_vsububs,
+};
+static const struct builtin *const O_vec_vsubuhm[6] = {
+ &B1_vec_vsubuhm,
+ &B2_vec_vsubuhm,
+ &B3_vec_vsubuhm,
+ &B4_vec_vsubuhm,
+ &B5_vec_vsubuhm,
+ &B6_vec_vsubuhm,
+};
+static const struct builtin *const O_vec_vsubuhs[3] = {
+ &B1_vec_vsubuhs,
+ &B2_vec_vsubuhs,
+ &B3_vec_vsubuhs,
+};
+static const struct builtin *const O_vec_vsubuwm[6] = {
+ &B1_vec_vsubuwm,
+ &B2_vec_vsubuwm,
+ &B3_vec_vsubuwm,
+ &B4_vec_vsubuwm,
+ &B5_vec_vsubuwm,
+ &B6_vec_vsubuwm,
+};
+static const struct builtin *const O_vec_vsubuws[3] = {
+ &B1_vec_vsubuws,
+ &B2_vec_vsubuws,
+ &B3_vec_vsubuws,
+};
+static const struct builtin *const O_vec_vsum2sws[1] = {
+ &B_vec_vsum2sws,
+};
+static const struct builtin *const O_vec_vsum4sbs[1] = {
+ &B_vec_vsum4sbs,
+};
+static const struct builtin *const O_vec_vsum4shs[1] = {
+ &B_vec_vsum4shs,
+};
+static const struct builtin *const O_vec_vsum4ubs[1] = {
+ &B_vec_vsum4ubs,
+};
+static const struct builtin *const O_vec_vsumsws[1] = {
+ &B_vec_vsumsws,
+};
+static const struct builtin *const O_vec_vupkhpx[1] = {
+ &B_vec_vupkhpx,
+};
+static const struct builtin *const O_vec_vupkhsb[2] = {
+ &B1_vec_vupkhsb,
+ &B2_vec_vupkhsb,
+};
+static const struct builtin *const O_vec_vupkhsh[2] = {
+ &B1_vec_vupkhsh,
+ &B2_vec_vupkhsh,
+};
+static const struct builtin *const O_vec_vupklpx[1] = {
+ &B_vec_vupklpx,
+};
+static const struct builtin *const O_vec_vupklsb[2] = {
+ &B1_vec_vupklsb,
+ &B2_vec_vupklsb,
+};
+static const struct builtin *const O_vec_vupklsh[2] = {
+ &B1_vec_vupklsh,
+ &B2_vec_vupklsh,
+};
+static const struct builtin *const O_vec_vxor[24] = {
+ &B1_vec_vxor,
+ &B2_vec_vxor,
+ &B3_vec_vxor,
+ &B4_vec_vxor,
+ &B5_vec_vxor,
+ &B6_vec_vxor,
+ &B7_vec_vxor,
+ &B8_vec_vxor,
+ &B9_vec_vxor,
+ &B10_vec_vxor,
+ &B11_vec_vxor,
+ &B12_vec_vxor,
+ &B13_vec_vxor,
+ &B14_vec_vxor,
+ &B15_vec_vxor,
+ &B16_vec_vxor,
+ &B17_vec_vxor,
+ &B18_vec_vxor,
+ &B19_vec_vxor,
+ &B20_vec_vxor,
+ &B21_vec_vxor,
+ &B22_vec_vxor,
+ &B23_vec_vxor,
+ &B24_vec_vxor,
+};
+static const struct builtin *const O_vec_xor[24] = {
+ &B1_vec_vxor,
+ &B2_vec_vxor,
+ &B3_vec_vxor,
+ &B4_vec_vxor,
+ &B5_vec_vxor,
+ &B6_vec_vxor,
+ &B7_vec_vxor,
+ &B8_vec_vxor,
+ &B9_vec_vxor,
+ &B10_vec_vxor,
+ &B11_vec_vxor,
+ &B12_vec_vxor,
+ &B13_vec_vxor,
+ &B14_vec_vxor,
+ &B15_vec_vxor,
+ &B16_vec_vxor,
+ &B17_vec_vxor,
+ &B18_vec_vxor,
+ &B19_vec_vxor,
+ &B20_vec_vxor,
+ &B21_vec_vxor,
+ &B22_vec_vxor,
+ &B23_vec_vxor,
+ &B24_vec_vxor,
+};
+
+const struct overloadx Overload[] = {
+ { "vec_abs", 4, 1, O_vec_abs, O_UID(0) },
+ { "vec_abss", 3, 1, O_vec_abss, O_UID(1) },
+ { "vec_add", 19, 2, O_vec_add, O_UID(2) },
+ { "vec_addc", 1, 2, O_vec_addc, O_UID(3) },
+ { "vec_adds", 18, 2, O_vec_adds, O_UID(4) },
+ { "vec_all_eq", 23, 2, O_vec_all_eq, O_UID(5) },
+ { "vec_all_ge", 19, 2, O_vec_all_ge, O_UID(6) },
+ { "vec_all_gt", 19, 2, O_vec_all_gt, O_UID(7) },
+ { "vec_all_in", 1, 2, O_vec_all_in, O_UID(8) },
+ { "vec_all_le", 19, 2, O_vec_all_le, O_UID(9) },
+ { "vec_all_lt", 19, 2, O_vec_all_lt, O_UID(10) },
+ { "vec_all_nan", 1, 1, O_vec_all_nan, O_UID(11) },
+ { "vec_all_ne", 23, 2, O_vec_all_ne, O_UID(12) },
+ { "vec_all_nge", 1, 2, O_vec_all_nge, O_UID(13) },
+ { "vec_all_ngt", 1, 2, O_vec_all_ngt, O_UID(14) },
+ { "vec_all_nle", 1, 2, O_vec_all_nle, O_UID(15) },
+ { "vec_all_nlt", 1, 2, O_vec_all_nlt, O_UID(16) },
+ { "vec_all_numeric", 1, 1, O_vec_all_numeric, O_UID(17) },
+ { "vec_and", 24, 2, O_vec_and, O_UID(18) },
+ { "vec_andc", 24, 2, O_vec_andc, O_UID(19) },
+ { "vec_any_eq", 23, 2, O_vec_any_eq, O_UID(20) },
+ { "vec_any_ge", 19, 2, O_vec_any_ge, O_UID(21) },
+ { "vec_any_gt", 19, 2, O_vec_any_gt, O_UID(22) },
+ { "vec_any_le", 19, 2, O_vec_any_le, O_UID(23) },
+ { "vec_any_lt", 19, 2, O_vec_any_lt, O_UID(24) },
+ { "vec_any_nan", 1, 1, O_vec_any_nan, O_UID(25) },
+ { "vec_any_ne", 23, 2, O_vec_any_ne, O_UID(26) },
+ { "vec_any_nge", 1, 2, O_vec_any_nge, O_UID(27) },
+ { "vec_any_ngt", 1, 2, O_vec_any_ngt, O_UID(28) },
+ { "vec_any_nle", 1, 2, O_vec_any_nle, O_UID(29) },
+ { "vec_any_nlt", 1, 2, O_vec_any_nlt, O_UID(30) },
+ { "vec_any_numeric", 1, 1, O_vec_any_numeric, O_UID(31) },
+ { "vec_any_out", 1, 2, O_vec_any_out, O_UID(32) },
+ { "vec_avg", 6, 2, O_vec_avg, O_UID(33) },
+ { "vec_ceil", 1, 1, O_vec_ceil, O_UID(34) },
+ { "vec_cmpb", 1, 2, O_vec_cmpb, O_UID(35) },
+ { "vec_cmpeq", 7, 2, O_vec_cmpeq, O_UID(36) },
+ { "vec_cmpge", 1, 2, O_vec_cmpge, O_UID(37) },
+ { "vec_cmpgt", 7, 2, O_vec_cmpgt, O_UID(38) },
+ { "vec_cmple", 1, 2, O_vec_cmple, O_UID(39) },
+ { "vec_cmplt", 7, 2, O_vec_cmplt, O_UID(40) },
+ { "vec_ctf", 2, 2, O_vec_ctf, O_UID(41) },
+ { "vec_cts", 1, 2, O_vec_cts, O_UID(42) },
+ { "vec_ctu", 1, 2, O_vec_ctu, O_UID(43) },
+ { "vec_dss", 1, 1, O_vec_dss, O_UID(44) },
+ { "vec_dssall", 1, 0, O_vec_dssall, O_UID(45) },
+ { "vec_dst", 20, 3, O_vec_dst, O_UID(46) },
+ { "vec_dstst", 20, 3, O_vec_dstst, O_UID(47) },
+ { "vec_dststt", 20, 3, O_vec_dststt, O_UID(48) },
+ { "vec_dstt", 20, 3, O_vec_dstt, O_UID(49) },
+ { "vec_expte", 1, 1, O_vec_expte, O_UID(50) },
+ { "vec_floor", 1, 1, O_vec_floor, O_UID(51) },
+ { "vec_ld", 20, 2, O_vec_ld, O_UID(52) },
+ { "vec_lde", 9, 2, O_vec_lde, O_UID(53) },
+ { "vec_ldl", 20, 2, O_vec_ldl, O_UID(54) },
+ { "vec_loge", 1, 1, O_vec_loge, O_UID(55) },
+ { "vec_lvebx", 2, 2, O_vec_lvebx, O_UID(56) },
+ { "vec_lvehx", 2, 2, O_vec_lvehx, O_UID(57) },
+ { "vec_lvewx", 5, 2, O_vec_lvewx, O_UID(58) },
+ { "vec_lvsl", 9, 2, O_vec_lvsl, O_UID(59) },
+ { "vec_lvsr", 9, 2, O_vec_lvsr, O_UID(60) },
+ { "vec_lvx", 20, 2, O_vec_lvx, O_UID(61) },
+ { "vec_lvxl", 20, 2, O_vec_lvxl, O_UID(62) },
+ { "vec_madd", 1, 3, O_vec_madd, O_UID(63) },
+ { "vec_madds", 1, 3, O_vec_madds, O_UID(64) },
+ { "vec_max", 19, 2, O_vec_max, O_UID(65) },
+ { "vec_mergeh", 11, 2, O_vec_mergeh, O_UID(66) },
+ { "vec_mergel", 11, 2, O_vec_mergel, O_UID(67) },
+ { "vec_mfvscr", 1, 0, O_vec_mfvscr, O_UID(68) },
+ { "vec_min", 19, 2, O_vec_min, O_UID(69) },
+ { "vec_mladd", 4, 3, O_vec_mladd, O_UID(70) },
+ { "vec_mradds", 1, 3, O_vec_mradds, O_UID(71) },
+ { "vec_msum", 4, 3, O_vec_msum, O_UID(72) },
+ { "vec_msums", 2, 3, O_vec_msums, O_UID(73) },
+ { "vec_mtvscr", 10, 1, O_vec_mtvscr, O_UID(74) },
+ { "vec_mule", 4, 2, O_vec_mule, O_UID(75) },
+ { "vec_mulo", 4, 2, O_vec_mulo, O_UID(76) },
+ { "vec_nmsub", 1, 3, O_vec_nmsub, O_UID(77) },
+ { "vec_nor", 10, 2, O_vec_nor, O_UID(78) },
+ { "vec_or", 24, 2, O_vec_or, O_UID(79) },
+ { "vec_pack", 6, 2, O_vec_pack, O_UID(80) },
+ { "vec_packpx", 1, 2, O_vec_packpx, O_UID(81) },
+ { "vec_packs", 4, 2, O_vec_packs, O_UID(82) },
+ { "vec_packsu", 4, 2, O_vec_packsu, O_UID(83) },
+ { "vec_perm", 11, 3, O_vec_perm, O_UID(84) },
+ { "vec_re", 1, 1, O_vec_re, O_UID(85) },
+ { "vec_rl", 6, 2, O_vec_rl, O_UID(86) },
+ { "vec_round", 1, 1, O_vec_round, O_UID(87) },
+ { "vec_rsqrte", 1, 1, O_vec_rsqrte, O_UID(88) },
+ { "vec_sel", 20, 3, O_vec_sel, O_UID(89) },
+ { "vec_sl", 6, 2, O_vec_sl, O_UID(90) },
+ { "vec_sld", 11, 3, O_vec_sld, O_UID(91) },
+ { "vec_sll", 30, 2, O_vec_sll, O_UID(92) },
+ { "vec_slo", 16, 2, O_vec_slo, O_UID(93) },
+ { "vec_splat", 11, 2, O_vec_splat, O_UID(94) },
+ { "vec_splat_s16", 1, 1, O_vec_splat_s16, O_UID(95) },
+ { "vec_splat_s32", 1, 1, O_vec_splat_s32, O_UID(96) },
+ { "vec_splat_s8", 1, 1, O_vec_splat_s8, O_UID(97) },
+ { "vec_splat_u16", 1, 1, O_vec_splat_u16, O_UID(98) },
+ { "vec_splat_u32", 1, 1, O_vec_splat_u32, O_UID(99) },
+ { "vec_splat_u8", 1, 1, O_vec_splat_u8, O_UID(100) },
+ { "vec_sr", 6, 2, O_vec_sr, O_UID(101) },
+ { "vec_sra", 6, 2, O_vec_sra, O_UID(102) },
+ { "vec_srl", 30, 2, O_vec_srl, O_UID(103) },
+ { "vec_sro", 16, 2, O_vec_sro, O_UID(104) },
+ { "vec_st", 30, 3, O_vec_st, O_UID(105) },
+ { "vec_ste", 19, 3, O_vec_ste, O_UID(106) },
+ { "vec_stl", 30, 3, O_vec_stl, O_UID(107) },
+ { "vec_stvebx", 6, 3, O_vec_stvebx, O_UID(108) },
+ { "vec_stvehx", 4, 3, O_vec_stvehx, O_UID(109) },
+ { "vec_stvewx", 9, 3, O_vec_stvewx, O_UID(110) },
+ { "vec_stvx", 30, 3, O_vec_stvx, O_UID(111) },
+ { "vec_stvxl", 30, 3, O_vec_stvxl, O_UID(112) },
+ { "vec_sub", 19, 2, O_vec_sub, O_UID(113) },
+ { "vec_subc", 1, 2, O_vec_subc, O_UID(114) },
+ { "vec_subs", 18, 2, O_vec_subs, O_UID(115) },
+ { "vec_sum2s", 1, 2, O_vec_sum2s, O_UID(116) },
+ { "vec_sum4s", 3, 2, O_vec_sum4s, O_UID(117) },
+ { "vec_sums", 1, 2, O_vec_sums, O_UID(118) },
+ { "vec_trunc", 1, 1, O_vec_trunc, O_UID(119) },
+ { "vec_unpack2sh", 2, 2, O_vec_unpack2sh, O_UID(120) },
+ { "vec_unpack2sl", 2, 2, O_vec_unpack2sl, O_UID(121) },
+ { "vec_unpack2uh", 2, 2, O_vec_unpack2uh, O_UID(122) },
+ { "vec_unpack2ul", 2, 2, O_vec_unpack2ul, O_UID(123) },
+ { "vec_unpackh", 5, 1, O_vec_unpackh, O_UID(124) },
+ { "vec_unpackl", 5, 1, O_vec_unpackl, O_UID(125) },
+ { "vec_vaddcuw", 1, 2, O_vec_vaddcuw, O_UID(126) },
+ { "vec_vaddfp", 1, 2, O_vec_vaddfp, O_UID(127) },
+ { "vec_vaddsbs", 3, 2, O_vec_vaddsbs, O_UID(128) },
+ { "vec_vaddshs", 3, 2, O_vec_vaddshs, O_UID(129) },
+ { "vec_vaddsws", 3, 2, O_vec_vaddsws, O_UID(130) },
+ { "vec_vaddubm", 6, 2, O_vec_vaddubm, O_UID(131) },
+ { "vec_vaddubs", 3, 2, O_vec_vaddubs, O_UID(132) },
+ { "vec_vadduhm", 6, 2, O_vec_vadduhm, O_UID(133) },
+ { "vec_vadduhs", 3, 2, O_vec_vadduhs, O_UID(134) },
+ { "vec_vadduwm", 6, 2, O_vec_vadduwm, O_UID(135) },
+ { "vec_vadduws", 3, 2, O_vec_vadduws, O_UID(136) },
+ { "vec_vand", 24, 2, O_vec_vand, O_UID(137) },
+ { "vec_vandc", 24, 2, O_vec_vandc, O_UID(138) },
+ { "vec_vavgsb", 1, 2, O_vec_vavgsb, O_UID(139) },
+ { "vec_vavgsh", 1, 2, O_vec_vavgsh, O_UID(140) },
+ { "vec_vavgsw", 1, 2, O_vec_vavgsw, O_UID(141) },
+ { "vec_vavgub", 1, 2, O_vec_vavgub, O_UID(142) },
+ { "vec_vavguh", 1, 2, O_vec_vavguh, O_UID(143) },
+ { "vec_vavguw", 1, 2, O_vec_vavguw, O_UID(144) },
+ { "vec_vcfsx", 1, 2, O_vec_vcfsx, O_UID(145) },
+ { "vec_vcfux", 1, 2, O_vec_vcfux, O_UID(146) },
+ { "vec_vcmpbfp", 1, 2, O_vec_vcmpbfp, O_UID(147) },
+ { "vec_vcmpeqfp", 1, 2, O_vec_vcmpeqfp, O_UID(148) },
+ { "vec_vcmpequb", 2, 2, O_vec_vcmpequb, O_UID(149) },
+ { "vec_vcmpequh", 2, 2, O_vec_vcmpequh, O_UID(150) },
+ { "vec_vcmpequw", 2, 2, O_vec_vcmpequw, O_UID(151) },
+ { "vec_vcmpgefp", 1, 2, O_vec_vcmpgefp, O_UID(152) },
+ { "vec_vcmpgtfp", 1, 2, O_vec_vcmpgtfp, O_UID(153) },
+ { "vec_vcmpgtsb", 1, 2, O_vec_vcmpgtsb, O_UID(154) },
+ { "vec_vcmpgtsh", 1, 2, O_vec_vcmpgtsh, O_UID(155) },
+ { "vec_vcmpgtsw", 1, 2, O_vec_vcmpgtsw, O_UID(156) },
+ { "vec_vcmpgtub", 1, 2, O_vec_vcmpgtub, O_UID(157) },
+ { "vec_vcmpgtuh", 1, 2, O_vec_vcmpgtuh, O_UID(158) },
+ { "vec_vcmpgtuw", 1, 2, O_vec_vcmpgtuw, O_UID(159) },
+ { "vec_vctsxs", 1, 2, O_vec_vctsxs, O_UID(160) },
+ { "vec_vctuxs", 1, 2, O_vec_vctuxs, O_UID(161) },
+ { "vec_vexptefp", 1, 1, O_vec_vexptefp, O_UID(162) },
+ { "vec_vlogefp", 1, 1, O_vec_vlogefp, O_UID(163) },
+ { "vec_vmaddfp", 1, 3, O_vec_vmaddfp, O_UID(164) },
+ { "vec_vmaxfp", 1, 2, O_vec_vmaxfp, O_UID(165) },
+ { "vec_vmaxsb", 3, 2, O_vec_vmaxsb, O_UID(166) },
+ { "vec_vmaxsh", 3, 2, O_vec_vmaxsh, O_UID(167) },
+ { "vec_vmaxsw", 3, 2, O_vec_vmaxsw, O_UID(168) },
+ { "vec_vmaxub", 3, 2, O_vec_vmaxub, O_UID(169) },
+ { "vec_vmaxuh", 3, 2, O_vec_vmaxuh, O_UID(170) },
+ { "vec_vmaxuw", 3, 2, O_vec_vmaxuw, O_UID(171) },
+ { "vec_vmhaddshs", 1, 3, O_vec_vmhaddshs, O_UID(172) },
+ { "vec_vmhraddshs", 1, 3, O_vec_vmhraddshs, O_UID(173) },
+ { "vec_vminfp", 1, 2, O_vec_vminfp, O_UID(174) },
+ { "vec_vminsb", 3, 2, O_vec_vminsb, O_UID(175) },
+ { "vec_vminsh", 3, 2, O_vec_vminsh, O_UID(176) },
+ { "vec_vminsw", 3, 2, O_vec_vminsw, O_UID(177) },
+ { "vec_vminub", 3, 2, O_vec_vminub, O_UID(178) },
+ { "vec_vminuh", 3, 2, O_vec_vminuh, O_UID(179) },
+ { "vec_vminuw", 3, 2, O_vec_vminuw, O_UID(180) },
+ { "vec_vmladduhm", 4, 3, O_vec_vmladduhm, O_UID(181) },
+ { "vec_vmrghb", 3, 2, O_vec_vmrghb, O_UID(182) },
+ { "vec_vmrghh", 4, 2, O_vec_vmrghh, O_UID(183) },
+ { "vec_vmrghw", 4, 2, O_vec_vmrghw, O_UID(184) },
+ { "vec_vmrglb", 3, 2, O_vec_vmrglb, O_UID(185) },
+ { "vec_vmrglh", 4, 2, O_vec_vmrglh, O_UID(186) },
+ { "vec_vmrglw", 4, 2, O_vec_vmrglw, O_UID(187) },
+ { "vec_vmsummbm", 1, 3, O_vec_vmsummbm, O_UID(188) },
+ { "vec_vmsumshm", 1, 3, O_vec_vmsumshm, O_UID(189) },
+ { "vec_vmsumshs", 1, 3, O_vec_vmsumshs, O_UID(190) },
+ { "vec_vmsumubm", 1, 3, O_vec_vmsumubm, O_UID(191) },
+ { "vec_vmsumuhm", 1, 3, O_vec_vmsumuhm, O_UID(192) },
+ { "vec_vmsumuhs", 1, 3, O_vec_vmsumuhs, O_UID(193) },
+ { "vec_vmulesb", 1, 2, O_vec_vmulesb, O_UID(194) },
+ { "vec_vmulesh", 1, 2, O_vec_vmulesh, O_UID(195) },
+ { "vec_vmuleub", 1, 2, O_vec_vmuleub, O_UID(196) },
+ { "vec_vmuleuh", 1, 2, O_vec_vmuleuh, O_UID(197) },
+ { "vec_vmulosb", 1, 2, O_vec_vmulosb, O_UID(198) },
+ { "vec_vmulosh", 1, 2, O_vec_vmulosh, O_UID(199) },
+ { "vec_vmuloub", 1, 2, O_vec_vmuloub, O_UID(200) },
+ { "vec_vmulouh", 1, 2, O_vec_vmulouh, O_UID(201) },
+ { "vec_vnmsubfp", 1, 3, O_vec_vnmsubfp, O_UID(202) },
+ { "vec_vnor", 10, 2, O_vec_vnor, O_UID(203) },
+ { "vec_vor", 24, 2, O_vec_vor, O_UID(204) },
+ { "vec_vperm", 11, 3, O_vec_vperm, O_UID(205) },
+ { "vec_vpkpx", 1, 2, O_vec_vpkpx, O_UID(206) },
+ { "vec_vpkshss", 1, 2, O_vec_vpkshss, O_UID(207) },
+ { "vec_vpkshus", 1, 2, O_vec_vpkshus, O_UID(208) },
+ { "vec_vpkswss", 1, 2, O_vec_vpkswss, O_UID(209) },
+ { "vec_vpkswus", 1, 2, O_vec_vpkswus, O_UID(210) },
+ { "vec_vpkuhum", 3, 2, O_vec_vpkuhum, O_UID(211) },
+ { "vec_vpkuhus", 1, 2, O_vec_vpkuhus, O_UID(212) },
+ { "vec_vpkuwum", 3, 2, O_vec_vpkuwum, O_UID(213) },
+ { "vec_vpkuwus", 1, 2, O_vec_vpkuwus, O_UID(214) },
+ { "vec_vrefp", 1, 1, O_vec_vrefp, O_UID(215) },
+ { "vec_vrfim", 1, 1, O_vec_vrfim, O_UID(216) },
+ { "vec_vrfin", 1, 1, O_vec_vrfin, O_UID(217) },
+ { "vec_vrfip", 1, 1, O_vec_vrfip, O_UID(218) },
+ { "vec_vrfiz", 1, 1, O_vec_vrfiz, O_UID(219) },
+ { "vec_vrlb", 2, 2, O_vec_vrlb, O_UID(220) },
+ { "vec_vrlh", 2, 2, O_vec_vrlh, O_UID(221) },
+ { "vec_vrlw", 2, 2, O_vec_vrlw, O_UID(222) },
+ { "vec_vrsqrtefp", 1, 1, O_vec_vrsqrtefp, O_UID(223) },
+ { "vec_vsel", 20, 3, O_vec_vsel, O_UID(224) },
+ { "vec_vsl", 30, 2, O_vec_vsl, O_UID(225) },
+ { "vec_vslb", 2, 2, O_vec_vslb, O_UID(226) },
+ { "vec_vsldoi", 11, 3, O_vec_vsldoi, O_UID(227) },
+ { "vec_vslh", 2, 2, O_vec_vslh, O_UID(228) },
+ { "vec_vslo", 16, 2, O_vec_vslo, O_UID(229) },
+ { "vec_vslw", 2, 2, O_vec_vslw, O_UID(230) },
+ { "vec_vspltb", 3, 2, O_vec_vspltb, O_UID(231) },
+ { "vec_vsplth", 4, 2, O_vec_vsplth, O_UID(232) },
+ { "vec_vspltisb", 1, 1, O_vec_vspltisb, O_UID(233) },
+ { "vec_vspltish", 1, 1, O_vec_vspltish, O_UID(234) },
+ { "vec_vspltisw", 1, 1, O_vec_vspltisw, O_UID(235) },
+ { "vec_vspltw", 4, 2, O_vec_vspltw, O_UID(236) },
+ { "vec_vsr", 30, 2, O_vec_vsr, O_UID(237) },
+ { "vec_vsrab", 2, 2, O_vec_vsrab, O_UID(238) },
+ { "vec_vsrah", 2, 2, O_vec_vsrah, O_UID(239) },
+ { "vec_vsraw", 2, 2, O_vec_vsraw, O_UID(240) },
+ { "vec_vsrb", 2, 2, O_vec_vsrb, O_UID(241) },
+ { "vec_vsrh", 2, 2, O_vec_vsrh, O_UID(242) },
+ { "vec_vsro", 16, 2, O_vec_vsro, O_UID(243) },
+ { "vec_vsrw", 2, 2, O_vec_vsrw, O_UID(244) },
+ { "vec_vsubcuw", 1, 2, O_vec_vsubcuw, O_UID(245) },
+ { "vec_vsubfp", 1, 2, O_vec_vsubfp, O_UID(246) },
+ { "vec_vsubsbs", 3, 2, O_vec_vsubsbs, O_UID(247) },
+ { "vec_vsubshs", 3, 2, O_vec_vsubshs, O_UID(248) },
+ { "vec_vsubsws", 3, 2, O_vec_vsubsws, O_UID(249) },
+ { "vec_vsububm", 6, 2, O_vec_vsububm, O_UID(250) },
+ { "vec_vsububs", 3, 2, O_vec_vsububs, O_UID(251) },
+ { "vec_vsubuhm", 6, 2, O_vec_vsubuhm, O_UID(252) },
+ { "vec_vsubuhs", 3, 2, O_vec_vsubuhs, O_UID(253) },
+ { "vec_vsubuwm", 6, 2, O_vec_vsubuwm, O_UID(254) },
+ { "vec_vsubuws", 3, 2, O_vec_vsubuws, O_UID(255) },
+ { "vec_vsum2sws", 1, 2, O_vec_vsum2sws, O_UID(256) },
+ { "vec_vsum4sbs", 1, 2, O_vec_vsum4sbs, O_UID(257) },
+ { "vec_vsum4shs", 1, 2, O_vec_vsum4shs, O_UID(258) },
+ { "vec_vsum4ubs", 1, 2, O_vec_vsum4ubs, O_UID(259) },
+ { "vec_vsumsws", 1, 2, O_vec_vsumsws, O_UID(260) },
+ { "vec_vupkhpx", 1, 1, O_vec_vupkhpx, O_UID(261) },
+ { "vec_vupkhsb", 2, 1, O_vec_vupkhsb, O_UID(262) },
+ { "vec_vupkhsh", 2, 1, O_vec_vupkhsh, O_UID(263) },
+ { "vec_vupklpx", 1, 1, O_vec_vupklpx, O_UID(264) },
+ { "vec_vupklsb", 2, 1, O_vec_vupklsb, O_UID(265) },
+ { "vec_vupklsh", 2, 1, O_vec_vupklsh, O_UID(266) },
+ { "vec_vxor", 24, 2, O_vec_vxor, O_UID(267) },
+ { "vec_xor", 24, 2, O_vec_xor, O_UID(268) },
+ { NULL, 0, 0, NULL, 0 }
+};
+#define LAST_O_UID O_UID(269)
diff --git a/gcc/config/rs6000/vec.ops b/gcc/config/rs6000/vec.ops
new file mode 100644
index 00000000000..5ef80a2d6b8
--- /dev/null
+++ b/gcc/config/rs6000/vec.ops
@@ -0,0 +1,1025 @@
+# APPLE LOCAL file AltiVec
+# ops-to-gp -gcc vec.ops builtin.ops
+vec_abs vec_s8 = vec_s8 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 1 FALSE FALSE transform_vec_abs
+vec_abs vec_s16 = vec_s16 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 2 FALSE FALSE transform_vec_abs
+vec_abs vec_s32 = vec_s32 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 3 FALSE FALSE transform_vec_abs
+vec_abs vec_f32 = vec_f32 vec_abs BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 4 FALSE FALSE transform_vec_abs
+vec_abss vec_s8 = vec_s8 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 5 FALSE FALSE transform_vec_abs
+vec_abss vec_s16 = vec_s16 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 6 FALSE FALSE transform_vec_abs
+vec_abss vec_s32 = vec_s32 vec_abss BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE 7 FALSE FALSE transform_vec_abs
+vec_cmplt vec_u8 vec_u8 = vec_b8 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtub FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_u16 vec_u16 = vec_b16 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuh FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_u32 vec_u32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuw FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_s8 vec_s8 = vec_b8 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsb FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_s16 vec_s16 = vec_b16 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsh FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_s32 vec_s32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsw FALSE FALSE transform_vec_cmp_reverse
+vec_cmplt vec_f32 vec_f32 = vec_b32 vec_cmplt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfp FALSE FALSE transform_vec_cmp_reverse
+vec_cmple vec_f32 vec_f32 = vec_b32 vec_cmple BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefp FALSE FALSE transform_vec_cmp_reverse
+vec_add vec_s8 vec_s8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s8 vec_b8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b8 vec_s8 = vec_s8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s16 vec_s16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s16 vec_b16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b16 vec_s16 = vec_s16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s32 vec_s32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_s32 vec_b32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b32 vec_s32 = vec_s32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u8 vec_u8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u8 vec_b8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b8 vec_u8 = vec_u8 vec_vaddubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u16 vec_u16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u16 vec_b16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b16 vec_u16 = vec_u16 vec_vadduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u32 vec_u32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_u32 vec_b32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_b32 vec_u32 = vec_u32 vec_vadduwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s8 vec_s8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s8 vec_b8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b8 vec_s8 = vec_s8 vec_vaddsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s16 vec_s16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s16 vec_b16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b16 vec_s16 = vec_s16 vec_vaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s32 vec_s32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_s32 vec_b32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b32 vec_s32 = vec_s32 vec_vaddsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u8 vec_u8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u8 vec_b8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b8 vec_u8 = vec_u8 vec_vaddubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u16 vec_u16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u16 vec_b16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b16 vec_u16 = vec_u16 vec_vadduhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u32 vec_u32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_u32 vec_b32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_adds vec_b32 vec_u32 = vec_u32 vec_vadduws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s8 vec_s8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s8 vec_b8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b8 vec_s8 = vec_s8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s16 vec_s16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s16 vec_b16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b16 vec_s16 = vec_s16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s32 vec_s32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_s32 vec_b32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b32 vec_s32 = vec_s32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u8 vec_u8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u8 vec_b8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b8 vec_u8 = vec_u8 vec_vsububm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u16 vec_u16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u16 vec_b16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b16 vec_u16 = vec_u16 vec_vsubuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u32 vec_u32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_u32 vec_b32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_b32 vec_u32 = vec_u32 vec_vsubuwm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s8 vec_s8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s8 vec_b8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b8 vec_s8 = vec_s8 vec_vsubsbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s16 vec_s16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s16 vec_b16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b16 vec_s16 = vec_s16 vec_vsubshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s32 vec_s32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_s32 vec_b32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b32 vec_s32 = vec_s32 vec_vsubsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u8 vec_u8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u8 vec_b8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b8 vec_u8 = vec_u8 vec_vsububs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u16 vec_u16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u16 vec_b16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b16 vec_u16 = vec_u16 vec_vsubuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u32 vec_u32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_u32 vec_b32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subs vec_b32 vec_u32 = vec_u32 vec_vsubuws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_addc vec_u32 vec_u32 = vec_u32 vec_vaddcuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_subc vec_u32 vec_u32 = vec_u32 vec_vsubcuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_u8 vec_u8 = vec_u16 vec_vmuloub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_u16 vec_u16 = vec_u32 vec_vmulouh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_s8 vec_s8 = vec_s16 vec_vmulosb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mulo vec_s16 vec_s16 = vec_s32 vec_vmulosh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_u8 vec_u8 = vec_u16 vec_vmuleub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_u16 vec_u16 = vec_u32 vec_vmuleuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_s8 vec_s8 = vec_s16 vec_vmulesb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mule vec_s16 vec_s16 = vec_s32 vec_vmulesh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_u16 vec_u16 vec_u16 = vec_u16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_s16 vec_u16 vec_u16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mladd vec_u16 vec_s16 vec_s16 = vec_s16 vec_vmladduhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_madds vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmhaddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mradds vec_s16 vec_s16 vec_s16 = vec_s16 vec_vmhraddshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_s8 vec_u8 vec_s32 = vec_s32 vec_vmsummbm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_u8 vec_u8 vec_u32 = vec_u32 vec_vmsumubm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_s16 vec_s16 vec_s32 = vec_s32 vec_vmsumshm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msum vec_u16 vec_u16 vec_u32 = vec_u32 vec_vmsumuhm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msums vec_s16 vec_s16 vec_s32 = vec_s32 vec_vmsumshs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_msums vec_u16 vec_u16 vec_u32 = vec_u32 vec_vmsumuhs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sums vec_s32 vec_s32 = vec_s32 vec_vsumsws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum2s vec_s32 vec_s32 = vec_s32 vec_vsum2sws BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum4s vec_s8 vec_s32 = vec_s32 vec_vsum4sbs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum4s vec_u8 vec_u32 = vec_u32 vec_vsum4ubs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sum4s vec_s16 vec_s32 = vec_s32 vec_vsum4shs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_s8 vec_s8 = vec_s8 vec_vavgsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_s16 vec_s16 = vec_s16 vec_vavgsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_u8 vec_u8 = vec_u8 vec_vavgub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_u16 vec_u16 = vec_u16 vec_vavguh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_s32 vec_s32 = vec_s32 vec_vavgsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_avg vec_u32 vec_u32 = vec_u32 vec_vavguw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s8 vec_s8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s8 vec_b8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b8 vec_s8 = vec_s8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u8 vec_u8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b8 vec_u8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u8 vec_b8 = vec_u8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b8 vec_b8 = vec_b8 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s16 vec_s16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s16 vec_b16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b16 vec_s16 = vec_s16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u16 vec_u16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b16 vec_u16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u16 vec_b16 = vec_u16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b16 vec_b16 = vec_b16 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s32 vec_s32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_s32 vec_b32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_s32 = vec_s32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u32 vec_u32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_u32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_u32 vec_b32 = vec_u32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_b32 = vec_b32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_f32 vec_f32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_f32 vec_b32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_and vec_b32 vec_f32 = vec_f32 vec_vand BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s8 vec_s8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s8 vec_b8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b8 vec_s8 = vec_s8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u8 vec_u8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b8 vec_u8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u8 vec_b8 = vec_u8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b8 vec_b8 = vec_b8 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s16 vec_s16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s16 vec_b16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b16 vec_s16 = vec_s16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u16 vec_u16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b16 vec_u16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u16 vec_b16 = vec_u16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b16 vec_b16 = vec_b16 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s32 vec_s32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_s32 vec_b32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_s32 = vec_s32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u32 vec_u32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_u32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_u32 vec_b32 = vec_u32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_b32 = vec_b32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_f32 vec_f32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_f32 vec_b32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_or vec_b32 vec_f32 = vec_f32 vec_vor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s8 vec_s8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s8 vec_b8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b8 vec_s8 = vec_s8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u8 vec_u8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b8 vec_u8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u8 vec_b8 = vec_u8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b8 vec_b8 = vec_b8 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s16 vec_s16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s16 vec_b16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b16 vec_s16 = vec_s16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u16 vec_u16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b16 vec_u16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u16 vec_b16 = vec_u16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b16 vec_b16 = vec_b16 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s32 vec_s32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_s32 vec_b32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_s32 = vec_s32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u32 vec_u32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_u32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_u32 vec_b32 = vec_u32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_b32 = vec_b32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_f32 vec_f32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_f32 vec_b32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_xor vec_b32 vec_f32 = vec_f32 vec_vxor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s8 vec_s8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s8 vec_b8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b8 vec_s8 = vec_s8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u8 vec_u8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b8 vec_u8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u8 vec_b8 = vec_u8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b8 vec_b8 = vec_b8 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s16 vec_s16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s16 vec_b16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b16 vec_s16 = vec_s16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u16 vec_u16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b16 vec_u16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u16 vec_b16 = vec_u16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b16 vec_b16 = vec_b16 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s32 vec_s32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_s32 vec_b32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_s32 = vec_s32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u32 vec_u32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_u32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_u32 vec_b32 = vec_u32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_b32 = vec_b32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_f32 vec_f32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_f32 vec_b32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_andc vec_b32 vec_f32 = vec_f32 vec_vandc BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_u8 vec_u8 = vec_u8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_s8 vec_s8 = vec_s8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_b8 vec_b8 = vec_b8 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_u16 vec_u16 = vec_u16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_s16 vec_s16 = vec_s16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_b16 vec_b16 = vec_b16 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_u32 vec_u32 = vec_u32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_s32 vec_s32 = vec_s32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_b32 vec_b32 = vec_b32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nor vec_f32 vec_f32 = vec_f32 vec_vnor BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_u8 vec_u8 = vec_u8 vec_vrlb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_u16 vec_u16 = vec_u16 vec_vrlh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_u32 vec_u32 = vec_u32 vec_vrlw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_s8 vec_u8 = vec_s8 vec_vrlb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_s16 vec_u16 = vec_s16 vec_vrlh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rl vec_s32 vec_u32 = vec_s32 vec_vrlw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_u8 vec_u8 = vec_u8 vec_vslb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_u16 vec_u16 = vec_u16 vec_vslh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_u32 vec_u32 = vec_u32 vec_vslw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_s8 vec_u8 = vec_s8 vec_vslb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_s16 vec_u16 = vec_s16 vec_vslh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sl vec_s32 vec_u32 = vec_s32 vec_vslw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u8 vec_u8 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u16 vec_u8 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u32 vec_u8 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s8 vec_u8 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s16 vec_u8 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s32 vec_u8 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b8 vec_u8 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b16 vec_u8 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b32 vec_u8 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_p16 vec_u8 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u8 vec_u16 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u16 vec_u16 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u32 vec_u16 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s8 vec_u16 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s16 vec_u16 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s32 vec_u16 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b8 vec_u16 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b16 vec_u16 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b32 vec_u16 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_p16 vec_u16 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u8 vec_u32 = vec_u8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u16 vec_u32 = vec_u16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_u32 vec_u32 = vec_u32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s8 vec_u32 = vec_s8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s16 vec_u32 = vec_s16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_s32 vec_u32 = vec_s32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b8 vec_u32 = vec_b8 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b16 vec_u32 = vec_b16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_b32 vec_u32 = vec_b32 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sll vec_p16 vec_u32 = vec_p16 vec_vsl BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_u8 vec_u8 = vec_u8 vec_vsrb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_u16 vec_u16 = vec_u16 vec_vsrh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_u32 vec_u32 = vec_u32 vec_vsrw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_s8 vec_u8 = vec_s8 vec_vsrb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_s16 vec_u16 = vec_s16 vec_vsrh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sr vec_s32 vec_u32 = vec_s32 vec_vsrw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_u8 vec_u8 = vec_u8 vec_vsrab BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_u16 vec_u16 = vec_u16 vec_vsrah BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_u32 vec_u32 = vec_u32 vec_vsraw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_s8 vec_u8 = vec_s8 vec_vsrab BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_s16 vec_u16 = vec_s16 vec_vsrah BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sra vec_s32 vec_u32 = vec_s32 vec_vsraw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u8 vec_u8 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u16 vec_u8 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u32 vec_u8 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s8 vec_u8 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s16 vec_u8 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s32 vec_u8 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b8 vec_u8 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b16 vec_u8 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b32 vec_u8 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_p16 vec_u8 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u8 vec_u16 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u16 vec_u16 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u32 vec_u16 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s8 vec_u16 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s16 vec_u16 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s32 vec_u16 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b8 vec_u16 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b16 vec_u16 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b32 vec_u16 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_p16 vec_u16 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u8 vec_u32 = vec_u8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u16 vec_u32 = vec_u16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_u32 vec_u32 = vec_u32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s8 vec_u32 = vec_s8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s16 vec_u32 = vec_s16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_s32 vec_u32 = vec_s32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b8 vec_u32 = vec_b8 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b16 vec_u32 = vec_b16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_b32 vec_u32 = vec_b32 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_srl vec_p16 vec_u32 = vec_p16 vec_vsr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_u8 vec_u8 = vec_b8 vec_vcmpgtub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_u16 vec_u16 = vec_b16 vec_vcmpgtuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_u32 vec_u32 = vec_b32 vec_vcmpgtuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_s8 vec_s8 = vec_b8 vec_vcmpgtsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_s16 vec_s16 = vec_b16 vec_vcmpgtsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_s32 vec_s32 = vec_b32 vec_vcmpgtsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_u8 vec_u8 = vec_b8 vec_vcmpequb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_u16 vec_u16 = vec_b16 vec_vcmpequh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_u32 vec_u32 = vec_b32 vec_vcmpequw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_s8 vec_s8 = vec_b8 vec_vcmpequb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_s16 vec_s16 = vec_b16 vec_vcmpequh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_s32 vec_s32 = vec_b32 vec_vcmpequw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b8 vec_b8 vec_b8 = vec_b8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b8 vec_b8 vec_u8 = vec_b8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u8 vec_u8 vec_u8 = vec_u8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u8 vec_u8 vec_b8 = vec_u8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s8 vec_s8 vec_u8 = vec_s8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s8 vec_s8 vec_b8 = vec_s8 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b16 vec_b16 vec_b16 = vec_b16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b16 vec_b16 vec_u16 = vec_b16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u16 vec_u16 vec_u16 = vec_u16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u16 vec_u16 vec_b16 = vec_u16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s16 vec_s16 vec_u16 = vec_s16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s16 vec_s16 vec_b16 = vec_s16 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b32 vec_b32 vec_b32 = vec_b32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_b32 vec_b32 vec_u32 = vec_b32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u32 vec_u32 vec_u32 = vec_u32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_u32 vec_u32 vec_b32 = vec_u32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s32 vec_s32 vec_u32 = vec_s32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_s32 vec_s32 vec_b32 = vec_s32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_f32 vec_f32 vec_b32 = vec_f32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sel vec_f32 vec_f32 vec_u32 = vec_f32 vec_vsel BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_u16 vec_u16 = vec_u8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_u32 vec_u32 = vec_u16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_s16 vec_s16 = vec_s8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_s32 vec_s32 = vec_s16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_b16 vec_b16 = vec_b8 vec_vpkuhum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_pack vec_b32 vec_b32 = vec_b16 vec_vpkuwum BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_u16 vec_u16 = vec_u8 vec_vpkuhus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_u32 vec_u32 = vec_u16 vec_vpkuwus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_u16 vec_u16 = vec_u8 vec_vpkuhus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_u32 vec_u32 = vec_u16 vec_vpkuwus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_s16 vec_s16 = vec_s8 vec_vpkshss BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packs vec_s32 vec_s32 = vec_s16 vec_vpkswss BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_s16 vec_s16 = vec_u8 vec_vpkshus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packsu vec_s32 vec_s32 = vec_u16 vec_vpkswus BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_packpx vec_u32 vec_u32 = vec_p16 vec_vpkpx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_s8 = vec_s16 vec_vupkhsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_s16 = vec_s32 vec_vupkhsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_b8 = vec_b16 vec_vupkhsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_b16 = vec_b32 vec_vupkhsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackh vec_p16 = vec_u32 vec_vupkhpx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_s8 = vec_s16 vec_vupklsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_s16 = vec_s32 vec_vupklsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_b8 = vec_b16 vec_vupklsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_b16 = vec_b32 vec_vupklsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpackl vec_p16 = vec_u32 vec_vupklpx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_u8 vec_u8 = vec_u8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_u16 vec_u16 = vec_u16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_u32 vec_u32 = vec_u32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_s8 vec_s8 = vec_s8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_s16 vec_s16 = vec_s16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_s32 vec_s32 = vec_s32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_f32 vec_f32 = vec_f32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_p16 vec_p16 = vec_p16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_b8 vec_b8 = vec_b8 vec_vmrghb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_b16 vec_b16 = vec_b16 vec_vmrghh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergeh vec_b32 vec_b32 = vec_b32 vec_vmrghw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpack2uh vec_u8 vec_u8 = vec_u16 vec_unpack2uh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghb
+vec_unpack2uh vec_u16 vec_u16 = vec_u32 vec_unpack2uh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghh
+vec_unpack2sh vec_u8 vec_u8 = vec_s16 vec_unpack2sh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghb
+vec_unpack2sh vec_u16 vec_u16 = vec_s32 vec_unpack2sh BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrghh
+vec_mergel vec_u8 vec_u8 = vec_u8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_u16 vec_u16 = vec_u16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_u32 vec_u32 = vec_u32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_s8 vec_s8 = vec_s8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_s16 vec_s16 = vec_s16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_s32 vec_s32 = vec_s32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_f32 vec_f32 = vec_f32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_p16 vec_p16 = vec_p16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_b8 vec_b8 = vec_b8 vec_vmrglb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_b16 vec_b16 = vec_b16 vec_vmrglh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mergel vec_b32 vec_b32 = vec_b32 vec_vmrglw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_unpack2ul vec_u8 vec_u8 = vec_u16 vec_unpack2ul BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglb
+vec_unpack2ul vec_u16 vec_u16 = vec_u32 vec_unpack2ul BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglh
+vec_unpack2sl vec_u8 vec_u8 = vec_s16 vec_unpack2sl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglb
+vec_unpack2sl vec_u16 vec_u16 = vec_s32 vec_unpack2sl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vmrglh
+vec_splat vec_u8 immed_u5 = vec_u8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_u16 immed_u5 = vec_u16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_u32 immed_u5 = vec_u32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_s8 immed_u5 = vec_s8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_s16 immed_u5 = vec_s16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_s32 immed_u5 = vec_s32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_b8 immed_u5 = vec_b8 vec_vspltb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_b16 immed_u5 = vec_b16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_b32 immed_u5 = vec_b32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_p16 immed_u5 = vec_p16 vec_vsplth BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat vec_f32 immed_u5 = vec_f32 vec_vspltw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_s8 immed_s5 = vec_s8 vec_vspltisb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_s16 immed_s5 = vec_s16 vec_vspltish BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_s32 immed_s5 = vec_s32 vec_vspltisw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_splat_u8 immed_s5 = vec_u8 vec_splat_u8 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltisb
+vec_splat_u16 immed_s5 = vec_u16 vec_splat_u16 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltish
+vec_splat_u32 immed_s5 = vec_u32 vec_splat_u32 BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vspltisw
+vec_perm vec_u8 vec_u8 vec_u8 = vec_u8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_u16 vec_u16 vec_u8 = vec_u16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_u32 vec_u32 vec_u8 = vec_u32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_s8 vec_s8 vec_u8 = vec_s8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_s16 vec_s16 vec_u8 = vec_s16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_s32 vec_s32 vec_u8 = vec_s32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_b8 vec_b8 vec_u8 = vec_b8 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_b16 vec_b16 vec_u8 = vec_b16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_b32 vec_b32 vec_u8 = vec_b32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_p16 vec_p16 vec_u8 = vec_p16 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_perm vec_f32 vec_f32 vec_u8 = vec_f32 vec_vperm BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_u8 vec_u8 immed_u4 = vec_u8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_u16 vec_u16 immed_u4 = vec_u16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_u32 vec_u32 immed_u4 = vec_u32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_s8 vec_s8 immed_u4 = vec_s8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_s16 vec_s16 immed_u4 = vec_s16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_s32 vec_s32 immed_u4 = vec_s32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_p16 vec_p16 immed_u4 = vec_p16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_f32 vec_f32 immed_u4 = vec_f32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_b8 vec_b8 immed_u4 = vec_b8 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_b16 vec_b16 immed_u4 = vec_b16 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sld vec_b32 vec_b32 immed_u4 = vec_b32 vec_vsldoi BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u8 vec_u8 = vec_u8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u16 vec_u8 = vec_u16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u32 vec_u8 = vec_u32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s8 vec_u8 = vec_s8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s16 vec_u8 = vec_s16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s32 vec_u8 = vec_s32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_p16 vec_u8 = vec_p16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u8 vec_s8 = vec_u8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u16 vec_s8 = vec_u16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_u32 vec_s8 = vec_u32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s8 vec_s8 = vec_s8 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s16 vec_s8 = vec_s16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_s32 vec_s8 = vec_s32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_p16 vec_s8 = vec_p16 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_f32 vec_u8 = vec_f32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_slo vec_f32 vec_s8 = vec_f32 vec_vslo BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u8 vec_u8 = vec_u8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u16 vec_u8 = vec_u16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u32 vec_u8 = vec_u32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s8 vec_u8 = vec_s8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s16 vec_u8 = vec_s16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s32 vec_u8 = vec_s32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_p16 vec_u8 = vec_p16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u8 vec_s8 = vec_u8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u16 vec_s8 = vec_u16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_u32 vec_s8 = vec_u32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s8 vec_s8 = vec_s8 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s16 vec_s8 = vec_s16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_s32 vec_s8 = vec_s32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_p16 vec_s8 = vec_p16 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_f32 vec_u8 = vec_f32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sro vec_f32 vec_s8 = vec_f32 vec_vsro BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u8 vec_u8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u8 vec_b8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b8 vec_u8 = vec_u8 vec_vmaxub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s8 vec_s8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s8 vec_b8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b8 vec_s8 = vec_s8 vec_vmaxsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u16 vec_u16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u16 vec_b16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b16 vec_u16 = vec_u16 vec_vmaxuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s16 vec_s16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s16 vec_b16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b16 vec_s16 = vec_s16 vec_vmaxsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u32 vec_u32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_u32 vec_b32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b32 vec_u32 = vec_u32 vec_vmaxuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s32 vec_s32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_s32 vec_b32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_b32 vec_s32 = vec_s32 vec_vmaxsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u8 vec_u8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u8 vec_b8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b8 vec_u8 = vec_u8 vec_vminub BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s8 vec_s8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s8 vec_b8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b8 vec_s8 = vec_s8 vec_vminsb BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u16 vec_u16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u16 vec_b16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b16 vec_u16 = vec_u16 vec_vminuh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s16 vec_s16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s16 vec_b16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b16 vec_s16 = vec_s16 vec_vminsh BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u32 vec_u32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_u32 vec_b32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b32 vec_u32 = vec_u32 vec_vminuw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s32 vec_s32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_s32 vec_b32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_b32 vec_s32 = vec_s32 vec_vminsw BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_lde int const_unsigned_char_ptr = vec_u8_load_op vec_lvebx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvebx TRUE FALSE
+vec_lde int const_unsigned_short_ptr = vec_u16_load_op vec_lvehx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvehx TRUE FALSE
+vec_lde int const_unsigned_int_ptr = vec_u32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_unsigned_long_ptr = vec_u32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_signed_char_ptr = vec_s8_load_op vec_lvebx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvebx TRUE FALSE
+vec_lde int const_short_ptr = vec_s16_load_op vec_lvehx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvehx TRUE FALSE
+vec_lde int const_int_ptr = vec_s32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_long_ptr = vec_s32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_lde int const_float_ptr = vec_f32_load_op vec_lvewx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvewx TRUE FALSE
+vec_ld int const_unsigned_char_ptr = vec_u8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_unsigned_short_ptr = vec_u16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_unsigned_int_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_unsigned_long_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_signed_char_ptr = vec_s8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_short_ptr = vec_s16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_int_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_long_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_float_ptr = vec_f32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_char_ptr = vec_u8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_short_ptr = vec_u16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_int_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_unsigned_long_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_signed_char_ptr = vec_s8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_short_ptr = vec_s16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_int_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_long_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_float_ptr = vec_f32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ld int const_vec_u8_ptr = vec_u8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_u16_ptr = vec_u16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_u32_ptr = vec_u32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_s8_ptr = vec_s8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_s16_ptr = vec_s16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_s32_ptr = vec_s32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_p16_ptr = vec_p16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_b8_ptr = vec_b8_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_b16_ptr = vec_b16_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_b32_ptr = vec_b32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ld int const_vec_f32_ptr = vec_f32_load_op vec_lvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvx TRUE FALSE transform_lvx
+vec_ldl int const_vec_u8_ptr = vec_u8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_u16_ptr = vec_u16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_u32_ptr = vec_u32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_s8_ptr = vec_s8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_s16_ptr = vec_s16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_s32_ptr = vec_s32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_p16_ptr = vec_p16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_b8_ptr = vec_b8_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_b16_ptr = vec_b16_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_b32_ptr = vec_b32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ldl int const_vec_f32_ptr = vec_f32_load_op vec_lvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvxl TRUE FALSE transform_lvx
+vec_ste vec_u8 int unsigned_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_u16 int unsigned_short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_u32 int unsigned_int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_u32 int unsigned_long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s8 int signed_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s16 int short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s32 int int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_s32 int long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_f32 int float_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_p16 int short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_p16 int unsigned_short_ptr = void_store_op vec_stvehx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b8 int unsigned_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b8 int signed_char_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b16 int unsigned_short_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b16 int short_ptr = void_store_op vec_stvebx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int unsigned_int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int unsigned_long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int int_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ste vec_b32 int long_ptr = void_store_op vec_stvewx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_st vec_u8 int unsigned_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u32 int unsigned_int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u32 int unsigned_long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s8 int signed_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s32 int int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s32 int long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_f32 int float_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_p16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_p16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b8 int unsigned_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b8 int signed_char_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b16 int unsigned_short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b16 int short_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int unsigned_int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int unsigned_long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int int_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int long_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_stl vec_u8 int unsigned_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u32 int unsigned_int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u32 int unsigned_long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s8 int signed_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s32 int int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s32 int long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_f32 int float_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_p16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_p16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b8 int unsigned_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b8 int signed_char_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b16 int unsigned_short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b16 int short_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int unsigned_int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int unsigned_long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int int_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int long_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_st vec_u8 int vec_u8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u16 int vec_u16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_u32 int vec_u32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s8 int vec_s8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s16 int vec_s16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_s32 int vec_s32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b8 int vec_b8_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b16 int vec_b16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_b32 int vec_b32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_p16 int vec_p16_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_st vec_f32 int vec_f32_ptr = void_store_op vec_stvx BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvx FALSE FALSE transform_stvx
+vec_stl vec_u8 int vec_u8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u16 int vec_u16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_u32 int vec_u32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s8 int vec_s8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s16 int vec_s16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_s32 int vec_s32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b8 int vec_b8_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b16 int vec_b16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_b32 int vec_b32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_p16 int vec_p16_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_stl vec_f32 int vec_f32_ptr = void_store_op vec_stvxl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_stvxl FALSE FALSE transform_stvx
+vec_lvsl int const_volatile_unsigned_char_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_unsigned_short_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_unsigned_int_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_unsigned_long_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_signed_char_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_short_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_int_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_long_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsl int const_volatile_float_ptr = vec_u8 vec_lvsl BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsl TRUE TRUE
+vec_lvsr int const_volatile_unsigned_char_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_unsigned_short_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_unsigned_int_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_unsigned_long_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_signed_char_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_short_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_int_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_long_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_lvsr int const_volatile_float_ptr = vec_u8 vec_lvsr BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_lvsr TRUE TRUE
+vec_mfvscr = volatile_vec_u16 vec_mfvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_u8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_u16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_u32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_s8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_s16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_s32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_b8 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_b16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_b32 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_mtvscr vec_p16 = volatile_void vec_mtvscr BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_dst const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_short_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_int_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_long_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_float_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dstt const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_short_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_int_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_long_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_float_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstst const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_short_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_int_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_long_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_float_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dststt const_unsigned_char_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_unsigned_short_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_unsigned_int_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_unsigned_long_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_signed_char_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_short_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_int_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_long_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_float_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dst const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dst const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dst TRUE FALSE
+vec_dstt const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstt const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dstt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstt TRUE FALSE
+vec_dstst const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dstst const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dstst BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dstst TRUE FALSE
+vec_dststt const_vec_u8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_u16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_u32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_s8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_s16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_s32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_b8_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_b16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_b32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_p16_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dststt const_vec_f32_ptr int immed_u2 = volatile_void_load_op vec_dststt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_dststt TRUE FALSE
+vec_dss immed_u2 = volatile_void_load_op vec_dss BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_dssall = volatile_void_load_op vec_dssall BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_add vec_f32 vec_f32 = vec_f32 vec_vaddfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_sub vec_f32 vec_f32 = vec_f32 vec_vsubfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_madd vec_f32 vec_f32 vec_f32 = vec_f32 vec_vmaddfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_nmsub vec_f32 vec_f32 vec_f32 = vec_f32 vec_vnmsubfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpgt vec_f32 vec_f32 = vec_b32 vec_vcmpgtfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpge vec_f32 vec_f32 = vec_b32 vec_vcmpgefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpeq vec_f32 vec_f32 = vec_b32 vec_vcmpeqfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cmpb vec_f32 vec_f32 = vec_s32 vec_vcmpbfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_max vec_f32 vec_f32 = vec_f32 vec_vmaxfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_min vec_f32 vec_f32 = vec_f32 vec_vminfp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_re vec_f32 = vec_f32 vec_vrefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_rsqrte vec_f32 = vec_f32 vec_vrsqrtefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_loge vec_f32 = vec_f32 vec_vlogefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_expte vec_f32 = vec_f32 vec_vexptefp BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_trunc vec_f32 = vec_f32 vec_vrfiz BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_round vec_f32 = vec_f32 vec_vrfin BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ceil vec_f32 = vec_f32 vec_vrfip BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_floor vec_f32 = vec_f32 vec_vrfim BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ctf vec_u32 immed_u5 = vec_f32 vec_vcfux BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ctf vec_s32 immed_u5 = vec_f32 vec_vcfsx BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_ctu vec_f32 immed_u5 = vec_u32 vec_vctuxs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_cts vec_f32 immed_u5 = vec_s32 vec_vctsxs BUILTIN_AFTER_TRAVERSE CFG_VEC
+vec_all_gt vec_u8 vec_u8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_gt vec_u8 vec_b8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_gt vec_b8 vec_u8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_le vec_u8 vec_u8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_le vec_u8 vec_b8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_le vec_b8 vec_u8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_gt vec_u8 vec_u8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_gt vec_u8 vec_b8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_gt vec_b8 vec_u8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_le vec_u8 vec_u8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_le vec_u8 vec_b8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_le vec_b8 vec_u8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_gt vec_s8 vec_s8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_gt vec_s8 vec_b8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_gt vec_b8 vec_s8 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_le vec_s8 vec_s8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_le vec_s8 vec_b8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_le vec_b8 vec_s8 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_gt vec_s8 vec_s8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_gt vec_s8 vec_b8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_gt vec_b8 vec_s8 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_le vec_s8 vec_s8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_le vec_s8 vec_b8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_le vec_b8 vec_s8 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_lt vec_u8 vec_u8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_lt vec_u8 vec_b8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_lt vec_b8 vec_u8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_ge vec_u8 vec_u8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_ge vec_u8 vec_b8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_ge vec_b8 vec_u8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_lt vec_u8 vec_u8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_lt vec_u8 vec_b8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_lt vec_b8 vec_u8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_ge vec_u8 vec_u8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_ge vec_u8 vec_b8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_any_ge vec_b8 vec_u8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtubD
+vec_all_lt vec_s8 vec_s8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_lt vec_s8 vec_b8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_lt vec_b8 vec_s8 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_ge vec_s8 vec_s8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_ge vec_s8 vec_b8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_ge vec_b8 vec_s8 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_lt vec_s8 vec_s8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_lt vec_s8 vec_b8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_lt vec_b8 vec_s8 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_ge vec_s8 vec_s8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_ge vec_s8 vec_b8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_any_ge vec_b8 vec_s8 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtsbD
+vec_all_gt vec_u16 vec_u16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_gt vec_u16 vec_b16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_gt vec_b16 vec_u16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_le vec_u16 vec_u16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_le vec_u16 vec_b16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_le vec_b16 vec_u16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_gt vec_u16 vec_u16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_gt vec_u16 vec_b16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_gt vec_b16 vec_u16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_le vec_u16 vec_u16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_le vec_u16 vec_b16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_le vec_b16 vec_u16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_gt vec_s16 vec_s16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_gt vec_s16 vec_b16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_gt vec_b16 vec_s16 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_le vec_s16 vec_s16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_le vec_s16 vec_b16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_le vec_b16 vec_s16 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_gt vec_s16 vec_s16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_gt vec_s16 vec_b16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_gt vec_b16 vec_s16 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_le vec_s16 vec_s16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_le vec_s16 vec_b16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_le vec_b16 vec_s16 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_lt vec_u16 vec_u16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_lt vec_u16 vec_b16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_lt vec_b16 vec_u16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_ge vec_u16 vec_u16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_ge vec_u16 vec_b16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_ge vec_b16 vec_u16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_lt vec_u16 vec_u16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_lt vec_u16 vec_b16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_lt vec_b16 vec_u16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_ge vec_u16 vec_u16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_ge vec_u16 vec_b16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_any_ge vec_b16 vec_u16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuhD
+vec_all_lt vec_s16 vec_s16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_lt vec_s16 vec_b16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_lt vec_b16 vec_s16 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_ge vec_s16 vec_s16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_ge vec_s16 vec_b16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_ge vec_b16 vec_s16 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_lt vec_s16 vec_s16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_lt vec_s16 vec_b16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_lt vec_b16 vec_s16 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_ge vec_s16 vec_s16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_ge vec_s16 vec_b16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_any_ge vec_b16 vec_s16 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtshD
+vec_all_gt vec_u32 vec_u32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_gt vec_u32 vec_b32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_gt vec_b32 vec_u32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_le vec_u32 vec_u32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_le vec_u32 vec_b32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_le vec_b32 vec_u32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_gt vec_u32 vec_u32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_gt vec_u32 vec_b32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_gt vec_b32 vec_u32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_le vec_u32 vec_u32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_le vec_u32 vec_b32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_le vec_b32 vec_u32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_gt vec_s32 vec_s32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_gt vec_s32 vec_b32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_gt vec_b32 vec_s32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_le vec_s32 vec_s32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_le vec_s32 vec_b32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_le vec_b32 vec_s32 = cc26t vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_gt vec_s32 vec_s32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_gt vec_s32 vec_b32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_gt vec_b32 vec_s32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_le vec_s32 vec_s32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_le vec_s32 vec_b32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_le vec_b32 vec_s32 = cc24f vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_lt vec_u32 vec_u32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_lt vec_u32 vec_b32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_lt vec_b32 vec_u32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_ge vec_u32 vec_u32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_ge vec_u32 vec_b32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_ge vec_b32 vec_u32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_lt vec_u32 vec_u32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_lt vec_u32 vec_b32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_lt vec_b32 vec_u32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_ge vec_u32 vec_u32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_ge vec_u32 vec_b32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_any_ge vec_b32 vec_u32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtuwD
+vec_all_lt vec_s32 vec_s32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_lt vec_s32 vec_b32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_lt vec_b32 vec_s32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_ge vec_s32 vec_s32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_ge vec_s32 vec_b32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_ge vec_b32 vec_s32 = cc26tr vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_lt vec_s32 vec_s32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_lt vec_s32 vec_b32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_lt vec_b32 vec_s32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_ge vec_s32 vec_s32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_ge vec_s32 vec_b32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_any_ge vec_b32 vec_s32 = cc24fr vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtswD
+vec_all_eq vec_u8 vec_u8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_u8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_b8 vec_u8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_b8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_u8 vec_u8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_u8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_b8 vec_u8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_b8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_u8 vec_u8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_u8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_b8 vec_u8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_b8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_u8 vec_u8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_u8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_b8 vec_u8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_b8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_s8 vec_s8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_s8 vec_b8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_b8 vec_s8 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_s8 vec_s8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_s8 vec_b8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_ne vec_b8 vec_s8 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_s8 vec_s8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_s8 vec_b8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_eq vec_b8 vec_s8 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_s8 vec_s8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_s8 vec_b8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_any_ne vec_b8 vec_s8 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequbD
+vec_all_eq vec_u16 vec_u16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_u16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_b16 vec_u16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_b16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_p16 vec_p16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_u16 vec_u16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_u16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_b16 vec_u16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_b16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_p16 vec_p16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_u16 vec_u16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_u16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_b16 vec_u16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_b16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_p16 vec_p16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_u16 vec_u16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_u16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_b16 vec_u16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_b16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_p16 vec_p16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_s16 vec_s16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_s16 vec_b16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_b16 vec_s16 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_s16 vec_s16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_s16 vec_b16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_ne vec_b16 vec_s16 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_s16 vec_s16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_s16 vec_b16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_eq vec_b16 vec_s16 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_s16 vec_s16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_s16 vec_b16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_any_ne vec_b16 vec_s16 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequhD
+vec_all_eq vec_u32 vec_u32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_u32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_b32 vec_u32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_b32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_u32 vec_u32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_u32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_b32 vec_u32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_b32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_u32 vec_u32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_u32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_b32 vec_u32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_b32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_u32 vec_u32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_u32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_b32 vec_u32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_b32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_s32 vec_s32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_s32 vec_b32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_eq vec_b32 vec_s32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_s32 vec_s32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_s32 vec_b32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_ne vec_b32 vec_s32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_s32 vec_s32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_s32 vec_b32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_eq vec_b32 vec_s32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_s32 vec_s32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_s32 vec_b32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_any_ne vec_b32 vec_s32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpequwD
+vec_all_gt vec_f32 vec_f32 = cc24t vec_all_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_ngt vec_f32 vec_f32 = cc26t vec_all_ngt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_ngt vec_f32 vec_f32 = cc24f vec_any_ngt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_gt vec_f32 vec_f32 = cc26f vec_any_gt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_lt vec_f32 vec_f32 = cc24tr vec_all_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_nlt vec_f32 vec_f32 = cc26tr vec_all_nlt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_nlt vec_f32 vec_f32 = cc24fr vec_any_nlt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_any_lt vec_f32 vec_f32 = cc26fr vec_any_lt BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgtfpD
+vec_all_ge vec_f32 vec_f32 = cc24t vec_all_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_nge vec_f32 vec_f32 = cc26t vec_all_nge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_nge vec_f32 vec_f32 = cc24f vec_any_nge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_ge vec_f32 vec_f32 = cc26f vec_any_ge BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_le vec_f32 vec_f32 = cc24tr vec_all_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_nle vec_f32 vec_f32 = cc26tr vec_all_nle BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_nle vec_f32 vec_f32 = cc24fr vec_any_nle BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_any_le vec_f32 vec_f32 = cc26fr vec_any_le BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpgefpD
+vec_all_eq vec_f32 vec_f32 = cc24t vec_all_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_ne vec_f32 vec_f32 = cc26t vec_all_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_ne vec_f32 vec_f32 = cc24f vec_any_ne BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_eq vec_f32 vec_f32 = cc26f vec_any_eq BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_numeric vec_f32 = cc24td vec_all_numeric BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_nan vec_f32 = cc26td vec_all_nan BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_nan vec_f32 = cc24fd vec_any_nan BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_any_numeric vec_f32 = cc26fd vec_any_numeric BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpeqfpD
+vec_all_in vec_f32 vec_f32 = cc26t vec_all_in BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpbfpD
+vec_any_out vec_f32 vec_f32 = cc26f vec_any_out BUILTIN_AFTER_TRAVERSE CFG_VEC FALSE MOP_vcmpbfpD
diff --git a/gcc/config/t-darwin b/gcc/config/t-darwin
index f5af52eb2e4..cbdb11d9325 100644
--- a/gcc/config/t-darwin
+++ b/gcc/config/t-darwin
@@ -14,7 +14,7 @@ gt-darwin.h : s-gtype ; @true
# Explain how to build crt2.o
$(T)crt2$(objext): $(srcdir)/config/darwin-crt2.c $(GCC_PASSES) \
$(TCONFIG_H) stmp-int-hdrs tsystem.h
- $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) \
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -mlongcall \
-c $(srcdir)/config/darwin-crt2.c -o $(T)crt2$(objext)
# Use unwind-dw2-fde-darwin
diff --git a/gcc/config/t-slibgcc-darwin b/gcc/config/t-slibgcc-darwin
index 34cb0d4160f..0120cf53cb9 100644
--- a/gcc/config/t-slibgcc-darwin
+++ b/gcc/config/t-slibgcc-darwin
@@ -28,3 +28,6 @@ SHLIB_INSTALL = \
$$(DESTDIR)$$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK)
SHLIB_MKMAP = $(srcdir)/mkmap-symver.awk
SHLIB_MAPFILES = $(srcdir)/libgcc-darwin.ver
+
+# APPLE LOCAL libcc_kext
+SHLIB_MULTILIB=.
diff --git a/gcc/config/x-linux b/gcc/config/x-linux
deleted file mode 100644
index d14586b0b36..00000000000
--- a/gcc/config/x-linux
+++ /dev/null
@@ -1,4 +0,0 @@
-host-linux.o : $(srcdir)/config/host-linux.c $(CONFIG_H) $(SYSTEM_H) \
- coretypes.h hosthooks.h hosthooks-def.h
- $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
- $(srcdir)/config/host-linux.c
diff --git a/gcc/config/x-solaris b/gcc/config/x-solaris
deleted file mode 100644
index 782f4a36802..00000000000
--- a/gcc/config/x-solaris
+++ /dev/null
@@ -1,4 +0,0 @@
-host-solaris.o : $(srcdir)/config/host-solaris.c $(CONFIG_H) $(SYSTEM_H) \
- coretypes.h hosthooks.h hosthooks-def.h
- $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
- $(srcdir)/config/host-solaris.c