aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/epiphany
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/epiphany')
-rw-r--r--gcc/config/epiphany/constraints.md125
-rw-r--r--gcc/config/epiphany/epiphany-modes.def40
-rw-r--r--gcc/config/epiphany/epiphany-protos.h55
-rw-r--r--gcc/config/epiphany/epiphany-sched.md135
-rw-r--r--gcc/config/epiphany/epiphany.c2751
-rw-r--r--gcc/config/epiphany/epiphany.h881
-rw-r--r--gcc/config/epiphany/epiphany.md2447
-rw-r--r--gcc/config/epiphany/epiphany.opt140
-rw-r--r--gcc/config/epiphany/epiphany_intrinsics.h27
-rw-r--r--gcc/config/epiphany/mode-switch-use.c91
-rw-r--r--gcc/config/epiphany/predicates.md352
-rw-r--r--gcc/config/epiphany/resolve-sw-modes.c182
-rw-r--r--gcc/config/epiphany/t-epiphany32
13 files changed, 7258 insertions, 0 deletions
diff --git a/gcc/config/epiphany/constraints.md b/gcc/config/epiphany/constraints.md
new file mode 100644
index 00000000000..d7c6c17845d
--- /dev/null
+++ b/gcc/config/epiphany/constraints.md
@@ -0,0 +1,125 @@
+;; Constraint definitions for Adaptiva epiphany
+;; Copyright (C) 2007, 2009, 2011 Free Software Foundation, Inc.
+;; Contributed by Embecosm on behalf of Adapteva, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Integer constraints
+
+(define_constraint "U16"
+ "An unsigned 16-bit constant."
+ (ior (and (match_code "const_int")
+ (match_test "IMM16 (ival)"))
+ (and (match_code "symbol_ref,label_ref,const")
+ (match_test "epiphany_small16 (op)"))))
+
+(define_constraint "K"
+ "An unsigned 5-bit constant."
+ (and (match_code "const_int")
+ (match_test "IMM5 (ival)")))
+
+;; This could also accept symbol_ref, label_ref or const if we introduce
+;; a small area and/or attribute that satisfies the 11-bit signed range.
+(define_constraint "L"
+ "A signed 11-bit constant."
+ (and (match_code "const_int")
+ (match_test "SIMM11 (ival)")))
+
+(define_constraint "Cm1"
+ "A signed 11-bit constant added to -1"
+ (and (match_code "const_int")
+ (match_test "SIMM11 (ival+1)")
+ (match_test "epiphany_m1reg >= 0")))
+
+(define_constraint "Cl1"
+ "Left-shift of -1"
+ (and (match_code "const_int")
+ (match_test "ival == (ival | ~(ival-1))")
+ (match_test "epiphany_m1reg >= 0")))
+
+(define_constraint "Cr1"
+ "Right-shift of -1"
+ (and (match_code "const_int")
+ (match_test "ival == (ival & ~(ival+1))")
+ (match_test "epiphany_m1reg >= 0")))
+
+(define_constraint "Cal"
+ "Constant for arithmetic/logical operations"
+ (match_test "(flag_pic
+ ? nonsymbolic_immediate_operand (op, VOIDmode)
+ : immediate_operand (op, VOIDmode))"))
+
+(define_constraint "Csy"
+ "Symbolic constant for call/jump instruction"
+ (match_test "symbolic_operand (op, VOIDmode)"))
+
+;; Register constraints
+;; proper register constraints define a register class and can thus
+;; drive register allocation and reload. OTOH sometimes we want to
+;; avoid just that.
+
+;; The register class usable in short insns.
+;; Subject to TARGET_PREFER_SHORT_INSN_REGS.
+(define_register_constraint "Rcs" "SHORT_INSN_REGS"
+ "short insn register class.")
+
+; The registers that can be used to hold a sibcall call address.
+; This must not conflict with any callee-saved registers.
+(define_register_constraint "Rsc" "SIBCALL_REGS"
+ "sibcall register class")
+
+; The registers that can be used to hold a status value
+(define_register_constraint "Rct" "CORE_CONTROL_REGS"
+ "Core control register class")
+
+;; The register group usable in short insns.
+(define_constraint "Rgs"
+ "short insn register group."
+ (and (match_code "reg")
+ (match_test "REGNO (op) >= FIRST_PSEUDO_REGISTER || REGNO (op) <= 7")))
+
+;; Constant suitable for the addsi3_r pattern.
+(define_constraint "Car"
+ "addsi3_r constant."
+ (and (match_code "const_int")
+ (ior (match_test "RTX_OK_FOR_OFFSET_P (SImode, op)")
+ (match_test "RTX_OK_FOR_OFFSET_P (HImode, op)")
+ (match_test "RTX_OK_FOR_OFFSET_P (QImode, op)"))))
+
+;; The return address if it can be replaced with GPR_LR.
+(define_constraint "Rra"
+ "return address constraint - register variant"
+ (and (match_code "unspec")
+ (match_test "XINT (op, 1) == UNSPEC_RETURN_ADDR")
+ (match_test "!MACHINE_FUNCTION (cfun)->lr_clobbered")))
+
+(define_constraint "Rcc"
+ "integer condition code"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == CC_REGNUM")))
+
+;; The return address, which might be a stack slot. */
+(define_constraint "Sra"
+ "return address constraint - memory variant"
+ (and (match_code "unspec")
+ (match_test "XINT (op, 1) == UNSPEC_RETURN_ADDR")))
+
+(define_constraint "Cfm"
+ "control register values to switch fp mode"
+ (and (match_code "const")
+ (match_test "GET_CODE (XEXP (op, 0)) == UNSPEC")
+ (match_test "XINT (XEXP (op, 0), 1) == UNSPEC_FP_MODE")))
diff --git a/gcc/config/epiphany/epiphany-modes.def b/gcc/config/epiphany/epiphany-modes.def
new file mode 100644
index 00000000000..c8375a1d75b
--- /dev/null
+++ b/gcc/config/epiphany/epiphany-modes.def
@@ -0,0 +1,40 @@
+/* Definitions of target machine for GNU compiler, Adapteva Epiphany cpu.
+ Copyright (C) 2002, 2007, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+CC_MODE (CC_Z); /* only Z valid - for add, testing result. */
+CC_MODE (CC_N_NE); /* N for not-equal (for lsl). */
+CC_MODE (CC_C_LTU); /* C for unsigned-less-than (for add with carry). */
+CC_MODE (CC_C_GTU); /* C for unsigned-greater-than (for sub with carry). */
+CC_MODE (CC_FP);
+CC_MODE (CC_FP_EQ); /* AZ for equal. */
+CC_MODE (CC_FP_ORD); /* AZ || ~AC for ordered. */
+CC_MODE (CC_FP_UNEQ); /* AZ || ~AC for unordered / equal. */
+CC_MODE (CC_FP_GTE); /* ~AC / AZ for greater than / equal. */
+#if 0 /* This would be needed for simplified NaN testing. */
+RESET_FLOAT_FORMAT (SF, motorola_single_format);
+RESET_FLOAT_FORMAT (DF, motorola_double_format);
+#endif
+VECTOR_MODES (INT, 4); /* V4QI V2HI */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
+VECTOR_MODE (FLOAT, SF, 2); /* V2SF */
+ADJUST_ALIGNMENT (V8QI, epiphany_vect_align);
+ADJUST_ALIGNMENT (V4HI, epiphany_vect_align);
+ADJUST_ALIGNMENT (V2SI, epiphany_vect_align);
+ADJUST_ALIGNMENT (V2SF, epiphany_vect_align);
diff --git a/gcc/config/epiphany/epiphany-protos.h b/gcc/config/epiphany/epiphany-protos.h
new file mode 100644
index 00000000000..334c5337f7c
--- /dev/null
+++ b/gcc/config/epiphany/epiphany-protos.h
@@ -0,0 +1,55 @@
+/* Definitions of target machine for GNU compiler, EPIPHANY cpu.
+ Copyright (C) 2000, 2004, 2007, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifdef RTX_CODE
+extern enum machine_mode epiphany_select_cc_mode (enum rtx_code, rtx, rtx);
+
+/* Define the function that build the compare insn for scc and bcc. */
+extern struct rtx_def *gen_compare_reg (enum machine_mode, enum rtx_code,
+ enum machine_mode, rtx, rtx);
+#endif
+
+/* Declarations for various fns used in the .md file. */
+extern void epiphany_final_prescan_insn (rtx, rtx *, int);
+extern bool epiphany_is_long_call_p (rtx);
+extern bool epiphany_small16 (rtx);
+bool epiphany_uninterruptible_p (tree decl);
+bool epiphany_call_uninterruptible_p (rtx mem);
+extern rtx sfunc_symbol (const char *name);
+
+extern void epiphany_expand_prologue (void);
+extern void epiphany_expand_epilogue (int);
+extern int epiphany_initial_elimination_offset (int, int);
+extern void epiphany_init_expanders (void);
+extern int hard_regno_mode_ok (int regno, enum machine_mode mode);
+#ifdef HARD_CONST
+extern void emit_set_fp_mode (int entity, int mode, HARD_REG_SET regs_live);
+#endif
+extern void epiphany_insert_mode_switch_use (rtx insn, int, int);
+extern void epiphany_expand_set_fp_mode (rtx *operands);
+extern int epiphany_mode_needed (int entity, rtx insn);
+extern int epiphany_mode_entry_exit (int entity, bool);
+extern int epiphany_mode_after (int entity, int last_mode, rtx insn);
+extern int epiphany_mode_priority_to_mode (int entity, unsigned priority);
+extern bool epiphany_epilogue_uses (int regno);
+extern bool epiphany_optimize_mode_switching (int entity);
+extern bool epiphany_is_interrupt_p (tree);
+extern unsigned epiphany_special_round_type_align (tree, unsigned, unsigned);
+extern unsigned epiphany_adjust_field_align (tree, unsigned);
diff --git a/gcc/config/epiphany/epiphany-sched.md b/gcc/config/epiphany/epiphany-sched.md
new file mode 100644
index 00000000000..a2420a562ae
--- /dev/null
+++ b/gcc/config/epiphany/epiphany-sched.md
@@ -0,0 +1,135 @@
+;; DFA scheduling description for EPIPHANY
+;; Copyright (C) 2004, 2006, 2007, 2009 Free Software Foundation, Inc.
+;; Contributed by Embecosm on behalf of Adapteva, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Two automata are defined to reduce number of states
+;; which a single large automaton will have. (Factoring)
+
+(define_automaton "inst_pipeline,fpu_pipe")
+
+;; This unit is basically the decode unit of the processor.
+;; Since epiphany is a dual issue machine, it is as if there are two
+;; units so that any insn can be processed by either one
+;; of the decoding unit.
+
+(define_cpu_unit "pipe_01,pipe_02" "inst_pipeline")
+
+;; The fixed point arithmetic unit.
+
+(define_cpu_unit "int" "inst_pipeline")
+
+;; The floating point unit.
+
+(define_cpu_unit "F0" "fpu_pipe")
+
+;; ----------------------------------------------------
+;; This reservation is to simplify the dual issue description.
+
+(define_reservation "issue" "pipe_01|pipe_02")
+
+;; This is to express instructions that cannot be paired.
+
+(define_reservation "d_lock" "pipe_01+pipe_02")
+
+;; We don't model all pipeline stages; we model the issue stage
+;; inasmuch as we allow only two instructions to issue simultaneously,
+;; and flow instructions prevent any simultaneous issue of another instruction.
+;; (This uses pipe_01 and pipe_02).
+;; Double issue of 'other' insns is prevented by using the int unit in the
+;; E1 stage.
+;; Double issue of float instructions is prevented by using F0 in the E1 stage.
+
+(define_insn_reservation "simple_arith" 2
+ (and (eq_attr "pipe_model" "epiphany")
+ (eq_attr "type" "move,cmove,compare,shift,misc,mul")
+ (eq_attr "length" "4"))
+ "issue,int")
+
+; anything but fp / fp_int has a bypass
+(define_bypass 1 "simple_arith" "simple_arith,simple_arith_2,simple_arith_4,load,store,branch,call,flow")
+
+(define_insn_reservation "simple_arith_2" 2
+ (and (eq_attr "pipe_model" "epiphany")
+ (eq_attr "type" "move,cmove,compare,shift,misc,mul")
+ (eq_attr "length" "8"))
+ "issue,issue+int,int")
+
+(define_insn_reservation "simple_arith_4" 4
+ (and (eq_attr "pipe_model" "epiphany")
+ (eq_attr "type" "move,compare,shift,misc,mul")
+ (eq_attr "length" "12,16,20,24"))
+ "issue,issue+int,issue+int,issue+int,int")
+
+;; Loads have a latency of two.
+;; Note that we fix up the latency of post_modify in epiphany.c:epiphany_adjust_cost
+
+(define_insn_reservation "load" 3
+ (and (eq_attr "pipe_model" "epiphany")
+ (eq_attr "type" "load"))
+ "issue,int")
+
+; anything but fp / fp_int has a bypass
+(define_bypass 2 "load" "simple_arith,simple_arith_2,simple_arith_4,load,store,branch,call,flow")
+
+(define_insn_reservation "store" 1
+ (and (eq_attr "pipe_model" "epiphany")
+ (eq_attr "type" "store"))
+ "issue,int")
+
+;; Branch
+;; Latency when taken: 3
+;; Issue Rate: 1
+;; The latency is 1 when the branch is not taken.
+;; We can't really do much with the latency, even if we could express it,
+;; but the pairing restrictions are useful to take into account.
+
+(define_insn_reservation "branch" 1
+ (and (eq_attr "pipe_model" "epiphany")
+ (eq_attr "type" "branch,uncond_branch"))
+ "d_lock")
+
+;; calls introduce a longisch delay that is likely to flush the pipelines
+;; of the caller's instructions. Both the call instruction itself and
+;; the rts at the end of the call / sfunc incurs a three cycle penalty,
+;; thus also isolating the scheduling of caller and callee.
+
+(define_insn_reservation "call" 8
+ (and (eq_attr "pipe_model" "epiphany")
+ (eq_attr "type" "call,sfunc,fp_sfunc"))
+ "d_lock*8")
+
+(define_insn_reservation "flow" 1
+ (and (eq_attr "pipe_model" "epiphany")
+ (eq_attr "type" "flow"))
+ "d_lock")
+
+(define_insn_reservation "fp_arith_trunc" 3
+ (and (eq_attr "pipe_model" "epiphany")
+ (and (eq_attr "type" "fp,fp_int")
+ (eq_attr "rounding" "trunc")))
+ "issue,F0")
+
+(define_insn_reservation "fp_arith_nearest" 5
+ (and (eq_attr "pipe_model" "epiphany")
+ (and (eq_attr "type" "fp,fp_int")
+ (eq_attr "rounding" "nearest")))
+ "issue,F0")
+
+(define_bypass 2 "fp_arith_trunc" "store")
+(define_bypass 4 "fp_arith_nearest" "store")
diff --git a/gcc/config/epiphany/epiphany.c b/gcc/config/epiphany/epiphany.c
new file mode 100644
index 00000000000..a4652da87b8
--- /dev/null
+++ b/gcc/config/epiphany/epiphany.c
@@ -0,0 +1,2751 @@
+/* Subroutines used for code generation on the EPIPHANY cpu.
+ Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
+ 2004, 2005, 2006, 2007, 2009, 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "function.h"
+#include "expr.h"
+#include "diagnostic-core.h"
+#include "recog.h"
+#include "toplev.h"
+#include "tm_p.h"
+#include "target.h"
+#include "df.h"
+#include "langhooks.h"
+#include "insn-codes.h"
+#include "ggc.h"
+#include "tm-constrs.h"
+#include "tree-pass.h"
+#include "integrate.h"
+
+/* Which cpu we're compiling for. */
+int epiphany_cpu_type;
+
+/* Name of mangle string to add to symbols to separate code compiled for each
+ cpu (or NULL). */
+const char *epiphany_mangle_cpu;
+
+/* Array of valid operand punctuation characters. */
+char epiphany_punct_chars[256];
+
+/* The rounding mode that we generally use for floating point. */
+int epiphany_normal_fp_rounding;
+
+static void epiphany_init_reg_tables (void);
+static int get_epiphany_condition_code (rtx);
+static tree epiphany_handle_interrupt_attribute (tree *, tree, tree, int, bool *);
+static bool epiphany_pass_by_reference (cumulative_args_t, enum machine_mode,
+ const_tree, bool);
+static rtx frame_insn (rtx);
+
+/* defines for the initialization of the GCC target structure. */
+#define TARGET_ATTRIBUTE_TABLE epiphany_attribute_table
+
+#define TARGET_PRINT_OPERAND epiphany_print_operand
+#define TARGET_PRINT_OPERAND_ADDRESS epiphany_print_operand_address
+
+#define TARGET_RTX_COSTS epiphany_rtx_costs
+#define TARGET_ADDRESS_COST epiphany_address_cost
+#define TARGET_MEMORY_MOVE_COST epiphany_memory_move_cost
+
+#define TARGET_PROMOTE_FUNCTION_MODE epiphany_promote_function_mode
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
+
+#define TARGET_RETURN_IN_MEMORY epiphany_return_in_memory
+#define TARGET_PASS_BY_REFERENCE epiphany_pass_by_reference
+#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
+#define TARGET_FUNCTION_VALUE epiphany_function_value
+#define TARGET_LIBCALL_VALUE epiphany_libcall_value
+#define TARGET_FUNCTION_VALUE_REGNO_P epiphany_function_value_regno_p
+
+#define TARGET_SETUP_INCOMING_VARARGS epiphany_setup_incoming_varargs
+
+/* Using the simplistic varags handling forces us to do partial reg/stack
+ argument passing for types with larger size (> 4 bytes) than alignemnt. */
+#define TARGET_ARG_PARTIAL_BYTES epiphany_arg_partial_bytes
+
+#define TARGET_FUNCTION_OK_FOR_SIBCALL epiphany_function_ok_for_sibcall
+
+#define TARGET_SCHED_ISSUE_RATE epiphany_issue_rate
+#define TARGET_SCHED_ADJUST_COST epiphany_adjust_cost
+
+#define TARGET_LEGITIMATE_ADDRESS_P epiphany_legitimate_address_p
+
+#define TARGET_SECONDARY_RELOAD epiphany_secondary_reload
+
+#define TARGET_OPTION_OVERRIDE epiphany_override_options
+
+#define TARGET_CONDITIONAL_REGISTER_USAGE epiphany_conditional_register_usage
+
+#define TARGET_FUNCTION_ARG epiphany_function_arg
+
+#define TARGET_FUNCTION_ARG_ADVANCE epiphany_function_arg_advance
+
+#define TARGET_FUNCTION_ARG_BOUNDARY epiphany_function_arg_boundary
+
+#define TARGET_TRAMPOLINE_INIT epiphany_trampoline_init
+
+/* Nonzero if the constant rtx value is a legitimate general operand.
+ We can handle any 32- or 64-bit constant. */
+#define TARGET_LEGITIMATE_CONSTANT_P hook_bool_mode_rtx_true
+
+#define TARGET_MIN_DIVISIONS_FOR_RECIP_MUL \
+ epiphany_min_divisions_for_recip_mul
+
+#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE epiphany_preferred_simd_mode
+
+#define TARGET_VECTOR_MODE_SUPPORTED_P epiphany_vector_mode_supported_p
+
+#define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE \
+ epiphany_vector_alignment_reachable
+
+#define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
+ epiphany_support_vector_misalignment
+
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
+ hook_bool_const_tree_hwi_hwi_const_tree_true
+#define TARGET_ASM_OUTPUT_MI_THUNK epiphany_output_mi_thunk
+
+#include "target-def.h"
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+
+bool
+epiphany_is_interrupt_p (tree decl)
+{
+ tree attrs;
+
+ attrs = DECL_ATTRIBUTES (decl);
+ if (lookup_attribute ("interrupt", attrs))
+ return true;
+ else
+ return false;
+}
+
+/* Called from epiphany_override_options.
+ We use this to initialize various things. */
+
+static void
+epiphany_init (void)
+{
+ /* N.B. this pass must not run before the first optimize_mode_switching
+ pass because of the side offect of epiphany_mode_needed on
+ MACHINE_FUNCTION(cfun)->unknown_mode_uses. But it must run before
+ pass_resolve_sw_modes. */
+ static struct register_pass_info insert_use_info
+ = { &pass_mode_switch_use.pass, "mode_sw",
+ 1, PASS_POS_INSERT_AFTER
+ };
+ static struct register_pass_info mode_sw2_info
+ = { &pass_mode_switching.pass, "mode_sw",
+ 1, PASS_POS_INSERT_AFTER
+ };
+ static struct register_pass_info mode_sw3_info
+ = { &pass_resolve_sw_modes.pass, "mode_sw",
+ 1, PASS_POS_INSERT_AFTER
+ };
+ static struct register_pass_info mode_sw4_info
+ = { &pass_split_all_insns.pass, "mode_sw",
+ 1, PASS_POS_INSERT_AFTER
+ };
+
+ epiphany_init_reg_tables ();
+
+ /* Initialize array for PRINT_OPERAND_PUNCT_VALID_P. */
+ memset (epiphany_punct_chars, 0, sizeof (epiphany_punct_chars));
+ epiphany_punct_chars['-'] = 1;
+
+ epiphany_normal_fp_rounding
+ = (epiphany_normal_fp_mode == FP_MODE_ROUND_TRUNC
+ ? FP_MODE_ROUND_TRUNC : FP_MODE_ROUND_NEAREST);
+ register_pass (&mode_sw4_info);
+ register_pass (&mode_sw2_info);
+ register_pass (&mode_sw3_info);
+ register_pass (&insert_use_info);
+ register_pass (&mode_sw2_info);
+
+#if 1 /* As long as peep2_rescan is not implemented,
+ (see http://gcc.gnu.org/ml/gcc-patches/2011-10/msg02819.html,)
+ we need a second peephole2 pass to get reasonable code. */
+ {
+ static struct register_pass_info peep2_2_info
+ = { &pass_peephole2.pass, "peephole2",
+ 1, PASS_POS_INSERT_AFTER
+ };
+
+ register_pass (&peep2_2_info);
+ }
+#endif
+}
+
+/* The condition codes of the EPIPHANY, and the inverse function. */
+static const char *const epiphany_condition_codes[] =
+{ /* 0 1 2 3 4 5 6 7 8 9 */
+ "eq", "ne", "ltu", "gteu", "gt", "lte", "gte", "lt", "gtu", "lteu",
+ /* 10 11 12 13 */
+ "beq","bne","blt", "blte",
+};
+
+#define EPIPHANY_INVERSE_CONDITION_CODE(X) ((X) ^ 1)
+
+/* Returns the index of the EPIPHANY condition code string in
+ `epiphany_condition_codes'. COMPARISON should be an rtx like
+ `(eq (...) (...))'. */
+
+static int
+get_epiphany_condition_code (rtx comparison)
+{
+ switch (GET_MODE (XEXP (comparison, 0)))
+ {
+ case CCmode:
+ switch (GET_CODE (comparison))
+ {
+ case EQ : return 0;
+ case NE : return 1;
+ case LTU : return 2;
+ case GEU : return 3;
+ case GT : return 4;
+ case LE : return 5;
+ case GE : return 6;
+ case LT : return 7;
+ case GTU : return 8;
+ case LEU : return 9;
+
+ default : gcc_unreachable ();
+ }
+ case CC_N_NEmode:
+ switch (GET_CODE (comparison))
+ {
+ case EQ: return 6;
+ case NE: return 7;
+ default: gcc_unreachable ();
+ }
+ case CC_C_LTUmode:
+ switch (GET_CODE (comparison))
+ {
+ case GEU: return 2;
+ case LTU: return 3;
+ default: gcc_unreachable ();
+ }
+ case CC_C_GTUmode:
+ switch (GET_CODE (comparison))
+ {
+ case LEU: return 3;
+ case GTU: return 2;
+ default: gcc_unreachable ();
+ }
+ case CC_FPmode:
+ switch (GET_CODE (comparison))
+ {
+ case EQ: return 10;
+ case NE: return 11;
+ case LT: return 12;
+ case LE: return 13;
+ default: gcc_unreachable ();
+ }
+ case CC_FP_EQmode:
+ switch (GET_CODE (comparison))
+ {
+ case EQ: return 0;
+ case NE: return 1;
+ default: gcc_unreachable ();
+ }
+ case CC_FP_GTEmode:
+ switch (GET_CODE (comparison))
+ {
+ case EQ: return 0;
+ case NE: return 1;
+ case GT : return 4;
+ case GE : return 6;
+ case UNLE : return 5;
+ case UNLT : return 7;
+ default: gcc_unreachable ();
+ }
+ case CC_FP_ORDmode:
+ switch (GET_CODE (comparison))
+ {
+ case ORDERED: return 9;
+ case UNORDERED: return 8;
+ default: gcc_unreachable ();
+ }
+ case CC_FP_UNEQmode:
+ switch (GET_CODE (comparison))
+ {
+ case UNEQ: return 9;
+ case LTGT: return 8;
+ default: gcc_unreachable ();
+ }
+ default: gcc_unreachable ();
+ }
+ /*NOTREACHED*/
+ return (42);
+}
+
+
+/* Return 1 if hard register REGNO can hold a value of machine_mode MODE. */
+int
+hard_regno_mode_ok (int regno, enum machine_mode mode)
+{
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return (regno & 1) == 0 && GPR_P (regno);
+ else
+ return 1;
+}
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison. */
+
+enum machine_mode
+epiphany_select_cc_mode (enum rtx_code op,
+ rtx x ATTRIBUTE_UNUSED,
+ rtx y ATTRIBUTE_UNUSED)
+{
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ {
+ if (TARGET_SOFT_CMPSF)
+ {
+ if (op == EQ || op == NE)
+ return CC_FP_EQmode;
+ if (op == ORDERED || op == UNORDERED)
+ return CC_FP_ORDmode;
+ if (op == UNEQ || op == LTGT)
+ return CC_FP_UNEQmode;
+ return CC_FP_GTEmode;
+ }
+ return CC_FPmode;
+ }
+ /* recognize combiner pattern ashlsi_btst:
+ (parallel [
+ (set (reg:N_NE 65 cc1)
+ (compare:N_NE (zero_extract:SI (reg/v:SI 75 [ a ])
+ (const_int 1 [0x1])
+ (const_int 0 [0x0]))
+ (const_int 0 [0x0])))
+ (clobber (scratch:SI)) */
+ else if ((op == EQ || op == NE)
+ && GET_CODE (x) == ZERO_EXTRACT
+ && XEXP (x, 1) == const1_rtx
+ && CONST_INT_P (XEXP (x, 2)))
+ return CC_N_NEmode;
+ else if ((op == GEU || op == LTU) && GET_CODE (x) == PLUS)
+ return CC_C_LTUmode;
+ else if ((op == LEU || op == GTU) && GET_CODE (x) == MINUS)
+ return CC_C_GTUmode;
+ else
+ return CCmode;
+}
+
+enum reg_class epiphany_regno_reg_class[FIRST_PSEUDO_REGISTER];
+
+static void
+epiphany_init_reg_tables (void)
+{
+ int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (i == GPR_LR)
+ epiphany_regno_reg_class[i] = LR_REGS;
+ else if (i <= 7 && TARGET_PREFER_SHORT_INSN_REGS)
+ epiphany_regno_reg_class[i] = SHORT_INSN_REGS;
+ else if (call_used_regs[i]
+ && TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], i))
+ epiphany_regno_reg_class[i] = SIBCALL_REGS;
+ else if (i >= CORE_CONTROL_FIRST && i <= CORE_CONTROL_LAST)
+ epiphany_regno_reg_class[i] = CORE_CONTROL_REGS;
+ else if (i < (GPR_LAST+1)
+ || i == ARG_POINTER_REGNUM || i == FRAME_POINTER_REGNUM)
+ epiphany_regno_reg_class[i] = GENERAL_REGS;
+ else if (i == CC_REGNUM)
+ epiphany_regno_reg_class[i] = NO_REGS /* CC_REG: must be NO_REGS */;
+ else
+ epiphany_regno_reg_class[i] = NO_REGS;
+ }
+}
+
+/* EPIPHANY specific attribute support.
+
+ The EPIPHANY has these attributes:
+ interrupt - for interrupt functions.
+ short_call - the function is assumed to be reachable with the b / bl
+ instructions.
+ long_call - the function address is loaded into a register before use.
+ disinterrupt - functions which mask interrupts throughout.
+ They unmask them while calling an interruptible
+ function, though. */
+
+static const struct attribute_spec epiphany_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "interrupt", 1, 1, true, false, false, epiphany_handle_interrupt_attribute, true },
+ { "long_call", 0, 0, false, true, true, NULL, false },
+ { "short_call", 0, 0, false, true, true, NULL, false },
+ { "disinterrupt", 0, 0, false, true, true, NULL, false },
+ { NULL, 0, 0, false, false, false, NULL, false }
+};
+
+/* Handle an "interrupt" attribute; arguments as in
+ struct attribute_spec.handler. */
+static tree
+epiphany_handle_interrupt_attribute (tree *node ATTRIBUTE_UNUSED,
+ tree name, tree args,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ tree value = TREE_VALUE (args);
+
+ if (TREE_CODE (value) != STRING_CST)
+ {
+ warning (OPT_Wattributes,
+ "argument of %qE attribute is not a string constant", name);
+ *no_add_attrs = true;
+ }
+ else if (strcmp (TREE_STRING_POINTER (value), "reset")
+ && strcmp (TREE_STRING_POINTER (value), "software_exception")
+ && strcmp (TREE_STRING_POINTER (value), "timer")
+ && strcmp (TREE_STRING_POINTER (value), "dma0")
+ && strcmp (TREE_STRING_POINTER (value), "dma1")
+ && strcmp (TREE_STRING_POINTER (value), "static_flag")
+ && strcmp (TREE_STRING_POINTER (value), "swi"))
+ {
+ warning (OPT_Wattributes,
+ "argument of %qE attribute is not \"reset\", \"software_exception\", \"timer\", \"dma0\", \"dma1\", \"static_flag\" or \"swi\"",
+ name);
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+
+/* Misc. utilities. */
+
+/* Generate a SYMBOL_REF for the special function NAME. When the address
+ can't be placed directly into a call instruction, and if possible, copy
+ it to a register so that cse / code hoisting is possible. */
+rtx
+sfunc_symbol (const char *name)
+{
+ rtx sym = gen_rtx_SYMBOL_REF (Pmode, name);
+
+ /* These sfuncs should be hidden, and every dso should get a copy. */
+ SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION | SYMBOL_FLAG_LOCAL;
+ if (TARGET_SHORT_CALLS)
+ ; /* Nothing to be done. */
+ else if (can_create_pseudo_p ())
+ sym = copy_to_mode_reg (Pmode, sym);
+ else /* We rely on reload to fix this up. */
+ gcc_assert (!reload_in_progress || reload_completed);
+ return sym;
+}
+
+/* X and Y are two things to compare using CODE in IN_MODE.
+ Emit the compare insn, construct the the proper cc reg in the proper
+ mode, and return the rtx for the cc reg comparison in CMODE. */
+
+rtx
+gen_compare_reg (enum machine_mode cmode, enum rtx_code code,
+ enum machine_mode in_mode, rtx x, rtx y)
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg, pat, clob0, clob1, clob2;
+
+ if (in_mode == VOIDmode)
+ in_mode = GET_MODE (x);
+ if (in_mode == VOIDmode)
+ in_mode = GET_MODE (y);
+
+ if (mode == CC_FPmode)
+ {
+ /* The epiphany has only EQ / NE / LT / LE conditions for
+ hardware floating point. */
+ if (code == GT || code == GE || code == UNLE || code == UNLT)
+ {
+ rtx tmp = x; x = y; y = tmp;
+ code = swap_condition (code);
+ }
+ cc_reg = gen_rtx_REG (mode, CCFP_REGNUM);
+ y = force_reg (in_mode, y);
+ }
+ else
+ {
+ if (mode == CC_FP_GTEmode
+ && (code == LE || code == LT || code == UNGT || code == UNGE))
+ {
+ rtx tmp = x; x = y; y = tmp;
+ code = swap_condition (code);
+ }
+ cc_reg = gen_rtx_REG (mode, CC_REGNUM);
+ }
+ if ((mode == CC_FP_EQmode || mode == CC_FP_GTEmode
+ || mode == CC_FP_ORDmode || mode == CC_FP_UNEQmode)
+ /* mov<mode>cc might want to re-emit a comparison during ifcvt. */
+ && (!REG_P (x) || REGNO (x) != 0 || !REG_P (y) || REGNO (y) != 1))
+ {
+ rtx reg;
+
+ gcc_assert (currently_expanding_to_rtl);
+ reg = gen_rtx_REG (in_mode, 0);
+ gcc_assert (!reg_overlap_mentioned_p (reg, y));
+ emit_move_insn (reg, x);
+ x = reg;
+ reg = gen_rtx_REG (in_mode, 1);
+ emit_move_insn (reg, y);
+ y = reg;
+ }
+ else
+ x = force_reg (in_mode, x);
+
+ pat = gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y));
+ if (mode == CC_FP_EQmode || mode == CC_FP_GTEmode)
+ {
+ const char *name = mode == CC_FP_EQmode ? "__eqsf2" : "__gtesf2";
+ rtx use = gen_rtx_USE (VOIDmode, sfunc_symbol (name));
+
+ clob0 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, GPR_IP));
+ clob1 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, GPR_LR));
+ pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, pat, use, clob0, clob1));
+ }
+ else if (mode == CC_FP_ORDmode || mode == CC_FP_UNEQmode)
+ {
+ const char *name = mode == CC_FP_ORDmode ? "__ordsf2" : "__uneqsf2";
+ rtx use = gen_rtx_USE (VOIDmode, sfunc_symbol (name));
+
+ clob0 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, GPR_IP));
+ clob1 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, GPR_16));
+ clob2 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, GPR_LR));
+ pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (5, pat, use,
+ clob0, clob1, clob2));
+ }
+ else
+ {
+ clob0 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (in_mode));
+ pat = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, pat, clob0));
+ }
+ emit_insn (pat);
+ return gen_rtx_fmt_ee (code, cmode, cc_reg, const0_rtx);
+}
+
+/* The ROUND_ADVANCE* macros are local to this file. */
+/* Round SIZE up to a word boundary. */
+#define ROUND_ADVANCE(SIZE) \
+ (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Round arg MODE/TYPE up to the next word boundary. */
+#define ROUND_ADVANCE_ARG(MODE, TYPE) \
+ ((MODE) == BLKmode \
+ ? ROUND_ADVANCE (int_size_in_bytes (TYPE)) \
+ : ROUND_ADVANCE (GET_MODE_SIZE (MODE)))
+
+/* Round CUM up to the necessary point for argument MODE/TYPE. */
+#define ROUND_ADVANCE_CUM(CUM, MODE, TYPE) \
+ (epiphany_function_arg_boundary ((MODE), (TYPE)) > BITS_PER_WORD \
+ ? (((CUM) + 1) & ~1) \
+ : (CUM))
+
+static unsigned int
+epiphany_function_arg_boundary (enum machine_mode mode, const_tree type)
+{
+ if ((type ? TYPE_ALIGN (type) : GET_MODE_BITSIZE (mode)) <= PARM_BOUNDARY)
+ return PARM_BOUNDARY;
+ return 2 * PARM_BOUNDARY;
+}
+
+/* Do any needed setup for a variadic function. For the EPIPHANY, we
+ actually emit the code in epiphany_expand_prologue.
+
+ CUM has not been updated for the last named argument which has type TYPE
+ and mode MODE, and we rely on this fact. */
+
+
+static void
+epiphany_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
+ tree type, int *pretend_size, int no_rtl)
+{
+ int first_anon_arg;
+ CUMULATIVE_ARGS next_cum;
+ machine_function_t *mf = MACHINE_FUNCTION (cfun);
+
+ /* All BLKmode values are passed by reference. */
+ gcc_assert (mode != BLKmode);
+
+ next_cum = *get_cumulative_args (cum);
+ next_cum
+ = ROUND_ADVANCE_CUM (next_cum, mode, type) + ROUND_ADVANCE_ARG (mode, type);
+ first_anon_arg = next_cum;
+
+ if (first_anon_arg < MAX_EPIPHANY_PARM_REGS && !no_rtl)
+ {
+ /* Note that first_reg_offset < MAX_EPIPHANY_PARM_REGS. */
+ int first_reg_offset = first_anon_arg;
+
+ *pretend_size = ((MAX_EPIPHANY_PARM_REGS - first_reg_offset)
+ * UNITS_PER_WORD);
+ }
+ mf->args_parsed = 1;
+ mf->pretend_args_odd = ((*pretend_size & UNITS_PER_WORD) ? 1 : 0);
+}
+
+static int
+epiphany_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
+ tree type, bool named ATTRIBUTE_UNUSED)
+{
+ int words = 0, rounded_cum;
+
+ gcc_assert (!epiphany_pass_by_reference (cum, mode, type, /* named */ true));
+
+ rounded_cum = ROUND_ADVANCE_CUM (*get_cumulative_args (cum), mode, type);
+ if (rounded_cum < MAX_EPIPHANY_PARM_REGS)
+ {
+ words = MAX_EPIPHANY_PARM_REGS - rounded_cum;
+ if (words >= ROUND_ADVANCE_ARG (mode, type))
+ words = 0;
+ }
+ return words * UNITS_PER_WORD;
+}
+
+/* Cost functions. */
+
+/* Compute a (partial) cost for rtx X. Return true if the complete
+ cost has been computed, and false if subexpressions should be
+ scanned. In either case, *TOTAL contains the cost result. */
+
+static bool
+epiphany_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
+ int *total, bool speed ATTRIBUTE_UNUSED)
+{
+ switch (code)
+ {
+ /* Small integers in the right context are as cheap as registers. */
+ case CONST_INT:
+ if ((outer_code == PLUS || outer_code == MINUS)
+ && SIMM11 (INTVAL (x)))
+ {
+ *total = 0;
+ return true;
+ }
+ if (IMM16 (INTVAL (x)))
+ {
+ *total = outer_code == SET ? 0 : COSTS_N_INSNS (1);
+ return true;
+ }
+ /* FALLTHRU */
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ *total = COSTS_N_INSNS ((epiphany_small16 (x) ? 0 : 1)
+ + (outer_code == SET ? 0 : 1));
+ return true;
+
+ case CONST_DOUBLE:
+ {
+ rtx high, low;
+ split_double (x, &high, &low);
+ *total = COSTS_N_INSNS (!IMM16 (INTVAL (high))
+ + !IMM16 (INTVAL (low)));
+ return true;
+ }
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ *total = COSTS_N_INSNS (1);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+
+/* Provide the costs of an addressing mode that contains ADDR.
+ If ADDR is not a valid address, its cost is irrelevant. */
+
+static int
+epiphany_address_cost (rtx addr, bool speed)
+{
+ rtx reg;
+ rtx off = const0_rtx;
+ int i;
+
+ if (speed)
+ return 0;
+ /* Return 0 for addresses valid in short insns, 1 for addresses only valid
+ in long insns. */
+ switch (GET_CODE (addr))
+ {
+ case PLUS :
+ reg = XEXP (addr, 0);
+ off = XEXP (addr, 1);
+ break;
+ case POST_MODIFY:
+ reg = XEXP (addr, 0);
+ off = XEXP (addr, 1);
+ gcc_assert (GET_CODE (off) == PLUS && rtx_equal_p (reg, XEXP (off, 0)));
+ off = XEXP (off, 1);
+ if (satisfies_constraint_Rgs (reg) && satisfies_constraint_Rgs (off))
+ return 0;
+ return 1;
+ case REG:
+ default:
+ reg = addr;
+ break;
+ }
+ if (!satisfies_constraint_Rgs (reg))
+ return 1;
+ /* ??? We don't know the mode of the memory access. We are going to assume
+ SImode, unless lack of offset alignment indicates a smaller access. */
+ /* First, make sure we have a valid integer. */
+ if (!satisfies_constraint_L (off))
+ return 1;
+ i = INTVAL (off);
+ if ((i & 1) == 0)
+ i >>= 1;
+ if ((i & 1) == 0)
+ i >>= 1;
+ if (i < -7 || i > 7)
+ return 1;
+ return 0;
+}
+
+/* Compute the cost of moving data between registers and memory.
+ For integer, load latency is twice as long as register-register moves,
+ but issue pich is the same. For floating point, load latency is three
+ times as much as a reg-reg move. */
+static int
+epiphany_memory_move_cost (enum machine_mode mode,
+ reg_class_t rclass ATTRIBUTE_UNUSED,
+ bool in ATTRIBUTE_UNUSED)
+{
+ return GET_MODE_CLASS (mode) == MODE_INT ? 3 : 4;
+}
+
+/* Function prologue/epilogue handlers. */
+
+/* EPIPHANY stack frames look like:
+
+ Before call After call
+ +-----------------------+ +-----------------------+
+ | | | |
+ high | local variables, | | local variables, |
+ mem | reg save area, etc. | | reg save area, etc. |
+ | | | |
+ +-----------------------+ +-----------------------+
+ | | | |
+ | arguments on stack. | | arguments on stack. |
+ | | | |
+ SP+8->+-----------------------+FP+8m->+-----------------------+
+ | 2 word save area for | | reg parm save area, |
+ | leaf funcs / flags | | only created for |
+ SP+0->+-----------------------+ | variable argument |
+ | functions |
+ FP+8n->+-----------------------+
+ | |
+ | register save area |
+ | |
+ +-----------------------+
+ | |
+ | local variables |
+ | |
+ FP+0->+-----------------------+
+ | |
+ | alloca allocations |
+ | |
+ +-----------------------+
+ | |
+ | arguments on stack |
+ | |
+ SP+8->+-----------------------+
+ low | 2 word save area for |
+ memory | leaf funcs / flags |
+ SP+0->+-----------------------+
+
+Notes:
+1) The "reg parm save area" does not exist for non variable argument fns.
+ The "reg parm save area" could be eliminated if we created our
+ own TARGET_GIMPLIFY_VA_ARG_EXPR, but that has tradeoffs as well
+ (so it's not done). */
+
+/* Structure to be filled in by epiphany_compute_frame_size with register
+ save masks, and offsets for the current function. */
+struct epiphany_frame_info
+{
+ unsigned int total_size; /* # bytes that the entire frame takes up. */
+ unsigned int pretend_size; /* # bytes we push and pretend caller did. */
+ unsigned int args_size; /* # bytes that outgoing arguments take up. */
+ unsigned int reg_size; /* # bytes needed to store regs. */
+ unsigned int var_size; /* # bytes that variables take up. */
+ HARD_REG_SET gmask; /* Set of saved gp registers. */
+ int initialized; /* Nonzero if frame size already calculated. */
+ int stld_sz; /* Current load/store data size for offset
+ adjustment. */
+ int need_fp; /* value to override "frame_pointer_needed */
+ int first_slot, last_slot, first_slot_offset, last_slot_offset;
+ int first_slot_size;
+ int small_threshold;
+};
+
+/* Current frame information calculated by epiphany_compute_frame_size. */
+static struct epiphany_frame_info current_frame_info;
+
+/* Zero structure to initialize current_frame_info. */
+static struct epiphany_frame_info zero_frame_info;
+
+/* The usual; we set up our machine_function data. */
+static struct machine_function *
+epiphany_init_machine_status (void)
+{
+ struct machine_function *machine;
+
+ /* Reset state info for each function. */
+ current_frame_info = zero_frame_info;
+
+ machine = ggc_alloc_cleared_machine_function_t ();
+
+ return machine;
+}
+
+/* Implements INIT_EXPANDERS. We just set up to call the above
+ * function. */
+void
+epiphany_init_expanders (void)
+{
+ init_machine_status = epiphany_init_machine_status;
+}
+
+/* Type of function DECL.
+
+ The result is cached. To reset the cache at the end of a function,
+ call with DECL = NULL_TREE. */
+
+static enum epiphany_function_type
+epiphany_compute_function_type (tree decl)
+{
+ tree a;
+ /* Cached value. */
+ static enum epiphany_function_type fn_type = EPIPHANY_FUNCTION_UNKNOWN;
+ /* Last function we were called for. */
+ static tree last_fn = NULL_TREE;
+
+ /* Resetting the cached value? */
+ if (decl == NULL_TREE)
+ {
+ fn_type = EPIPHANY_FUNCTION_UNKNOWN;
+ last_fn = NULL_TREE;
+ return fn_type;
+ }
+
+ if (decl == last_fn && fn_type != EPIPHANY_FUNCTION_UNKNOWN)
+ return fn_type;
+
+ /* Assume we have a normal function (not an interrupt handler). */
+ fn_type = EPIPHANY_FUNCTION_NORMAL;
+
+ /* Now see if this is an interrupt handler. */
+ for (a = DECL_ATTRIBUTES (decl);
+ a;
+ a = TREE_CHAIN (a))
+ {
+ tree name = TREE_PURPOSE (a), args = TREE_VALUE (a);
+
+ if (name == get_identifier ("interrupt")
+ && list_length (args) == 1
+ && TREE_CODE (TREE_VALUE (args)) == STRING_CST)
+ {
+ tree value = TREE_VALUE (args);
+
+ if (!strcmp (TREE_STRING_POINTER (value), "reset"))
+ fn_type = EPIPHANY_FUNCTION_RESET;
+ else if (!strcmp (TREE_STRING_POINTER (value), "software_exception"))
+ fn_type = EPIPHANY_FUNCTION_SOFTWARE_EXCEPTION;
+ else if (!strcmp (TREE_STRING_POINTER (value), "timer"))
+ fn_type = EPIPHANY_FUNCTION_TIMER;
+ else if (!strcmp (TREE_STRING_POINTER (value), "dma0"))
+ fn_type = EPIPHANY_FUNCTION_DMA0;
+ else if (!strcmp (TREE_STRING_POINTER (value), "dma1"))
+ fn_type = EPIPHANY_FUNCTION_DMA1;
+ else if (!strcmp (TREE_STRING_POINTER (value), "static_flag"))
+ fn_type = EPIPHANY_FUNCTION_STATIC_FLAG;
+ else if (!strcmp (TREE_STRING_POINTER (value), "swi"))
+ fn_type = EPIPHANY_FUNCTION_SWI;
+ else
+ gcc_unreachable ();
+ break;
+ }
+ }
+
+ last_fn = decl;
+ return fn_type;
+}
+
+#define RETURN_ADDR_REGNUM GPR_LR
+#define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
+#define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
+
+/* Tell prologue and epilogue if register REGNO should be saved / restored.
+ The return address and frame pointer are treated separately.
+ Don't consider them here. */
+#define MUST_SAVE_REGISTER(regno, interrupt_p) \
+ ((df_regs_ever_live_p (regno) \
+ || (interrupt_p && !current_function_is_leaf \
+ && call_used_regs[regno] && !fixed_regs[regno])) \
+ && (!call_used_regs[regno] || regno == GPR_LR \
+ || (interrupt_p && regno != GPR_SP)))
+
+#define MUST_SAVE_RETURN_ADDR 0
+
+/* Return the bytes needed to compute the frame pointer from the current
+ stack pointer.
+
+ SIZE is the size needed for local variables. */
+
+static unsigned int
+epiphany_compute_frame_size (int size /* # of var. bytes allocated. */)
+{
+ int regno;
+ unsigned int total_size, var_size, args_size, pretend_size, reg_size;
+ HARD_REG_SET gmask;
+ enum epiphany_function_type fn_type;
+ int interrupt_p;
+ int first_slot, last_slot, first_slot_offset, last_slot_offset;
+ int first_slot_size;
+ int small_slots = 0;
+ long lr_slot_offset;
+
+ var_size = size;
+ args_size = crtl->outgoing_args_size;
+ pretend_size = crtl->args.pretend_args_size;
+ total_size = args_size + var_size;
+ reg_size = 0;
+ CLEAR_HARD_REG_SET (gmask);
+ first_slot = -1;
+ first_slot_offset = 0;
+ last_slot = -1;
+ last_slot_offset = 0;
+ first_slot_size = UNITS_PER_WORD;
+
+ /* See if this is an interrupt handler. Call used registers must be saved
+ for them too. */
+ fn_type = epiphany_compute_function_type (current_function_decl);
+ interrupt_p = EPIPHANY_INTERRUPT_P (fn_type);
+
+ /* Calculate space needed for registers. */
+
+ for (regno = MAX_EPIPHANY_PARM_REGS - 1; pretend_size > reg_size; regno--)
+ {
+ reg_size += UNITS_PER_WORD;
+ SET_HARD_REG_BIT (gmask, regno);
+ if (epiphany_stack_offset - reg_size == 0)
+ first_slot = regno;
+ }
+
+ if (interrupt_p)
+ reg_size += 2 * UNITS_PER_WORD;
+ else
+ small_slots = epiphany_stack_offset / UNITS_PER_WORD;
+
+ if (frame_pointer_needed)
+ {
+ current_frame_info.need_fp = 1;
+ if (!interrupt_p && first_slot < 0)
+ first_slot = GPR_FP;
+ }
+ else
+ current_frame_info.need_fp = 0;
+ for (regno = 0; regno <= GPR_LAST; regno++)
+ {
+ if (MUST_SAVE_REGISTER (regno, interrupt_p))
+ {
+ gcc_assert (!TEST_HARD_REG_BIT (gmask, regno));
+ reg_size += UNITS_PER_WORD;
+ SET_HARD_REG_BIT (gmask, regno);
+ /* FIXME: when optimizing for speed, take schedling into account
+ when selecting these registers. */
+ if (regno == first_slot)
+ gcc_assert (regno == GPR_FP && frame_pointer_needed);
+ else if (!interrupt_p && first_slot < 0)
+ first_slot = regno;
+ else if (last_slot < 0
+ && (first_slot ^ regno) != 1
+ && (!interrupt_p || regno > GPR_0 + 1))
+ last_slot = regno;
+ }
+ }
+ if (TEST_HARD_REG_BIT (gmask, GPR_LR))
+ MACHINE_FUNCTION (cfun)->lr_clobbered = 1;
+ /* ??? Could sometimes do better than that. */
+ current_frame_info.small_threshold
+ = (optimize >= 3 || interrupt_p ? 0
+ : pretend_size ? small_slots
+ : 4 + small_slots - (first_slot == GPR_FP));
+
+ /* If there might be variables with 64-bit alignment requirement, align the
+ start of the variables. */
+ if (var_size >= 2 * UNITS_PER_WORD
+ /* We don't want to split a double reg save/restore across two unpaired
+ stack slots when optimizing. This rounding could be avoided with
+ more complex reordering of the register saves, but that would seem
+ to be a lot of code complexity for little gain. */
+ || (reg_size > 8 && optimize))
+ reg_size = EPIPHANY_STACK_ALIGN (reg_size);
+ if (total_size + reg_size <= (unsigned) epiphany_stack_offset
+ && !interrupt_p
+ && current_function_is_leaf && !frame_pointer_needed)
+ {
+ first_slot = -1;
+ last_slot = -1;
+ goto alloc_done;
+ }
+ else if (reg_size
+ && !interrupt_p
+ && reg_size < (unsigned HOST_WIDE_INT) epiphany_stack_offset)
+ reg_size = epiphany_stack_offset;
+ if (interrupt_p)
+ {
+ if (total_size + reg_size < 0x3fc)
+ {
+ first_slot_offset = EPIPHANY_STACK_ALIGN (total_size + reg_size);
+ first_slot_offset += EPIPHANY_STACK_ALIGN (epiphany_stack_offset);
+ last_slot = -1;
+ }
+ else
+ {
+ first_slot_offset = EPIPHANY_STACK_ALIGN (reg_size);
+ last_slot_offset = EPIPHANY_STACK_ALIGN (total_size);
+ last_slot_offset += EPIPHANY_STACK_ALIGN (epiphany_stack_offset);
+ if (last_slot >= 0)
+ CLEAR_HARD_REG_BIT (gmask, last_slot);
+ }
+ }
+ else if (total_size + reg_size < 0x1ffc && first_slot >= 0)
+ {
+ first_slot_offset = EPIPHANY_STACK_ALIGN (total_size + reg_size);
+ last_slot = -1;
+ }
+ else
+ {
+ if (total_size + reg_size <= (unsigned) epiphany_stack_offset)
+ {
+ gcc_assert (first_slot < 0);
+ gcc_assert (reg_size == 0);
+ last_slot_offset = EPIPHANY_STACK_ALIGN (total_size + reg_size);
+ }
+ else
+ {
+ first_slot_offset
+ = (reg_size
+ ? EPIPHANY_STACK_ALIGN (reg_size - epiphany_stack_offset) : 0);
+ if (!first_slot_offset)
+ {
+ if (first_slot != GPR_FP || !current_frame_info.need_fp)
+ last_slot = first_slot;
+ first_slot = -1;
+ }
+ last_slot_offset = EPIPHANY_STACK_ALIGN (total_size);
+ if (reg_size)
+ last_slot_offset += EPIPHANY_STACK_ALIGN (epiphany_stack_offset);
+ }
+ if (last_slot >= 0)
+ CLEAR_HARD_REG_BIT (gmask, last_slot);
+ }
+ alloc_done:
+ if (first_slot >= 0)
+ {
+ CLEAR_HARD_REG_BIT (gmask, first_slot);
+ if (TEST_HARD_REG_BIT (gmask, first_slot ^ 1)
+ && epiphany_stack_offset - pretend_size >= 2 * UNITS_PER_WORD)
+ {
+ CLEAR_HARD_REG_BIT (gmask, first_slot ^ 1);
+ first_slot_size = 2 * UNITS_PER_WORD;
+ first_slot &= ~1;
+ }
+ }
+ total_size = first_slot_offset + last_slot_offset;
+
+ lr_slot_offset
+ = (frame_pointer_needed ? first_slot_offset : (long) total_size);
+ if (first_slot != GPR_LR)
+ {
+ int stack_offset = epiphany_stack_offset - UNITS_PER_WORD;
+
+ for (regno = 0; ; regno++)
+ {
+ if (stack_offset + UNITS_PER_WORD - first_slot_size == 0
+ && first_slot >= 0)
+ {
+ stack_offset -= first_slot_size;
+ regno--;
+ }
+ else if (regno == GPR_LR)
+ break;
+ else if TEST_HARD_REG_BIT (gmask, regno)
+ stack_offset -= UNITS_PER_WORD;
+ }
+ lr_slot_offset += stack_offset;
+ }
+
+ /* Save computed information. */
+ current_frame_info.total_size = total_size;
+ current_frame_info.pretend_size = pretend_size;
+ current_frame_info.var_size = var_size;
+ current_frame_info.args_size = args_size;
+ current_frame_info.reg_size = reg_size;
+ COPY_HARD_REG_SET (current_frame_info.gmask, gmask);
+ current_frame_info.first_slot = first_slot;
+ current_frame_info.last_slot = last_slot;
+ current_frame_info.first_slot_offset = first_slot_offset;
+ current_frame_info.first_slot_size = first_slot_size;
+ current_frame_info.last_slot_offset = last_slot_offset;
+ MACHINE_FUNCTION (cfun)->lr_slot_offset = lr_slot_offset;
+
+ current_frame_info.initialized = reload_completed;
+
+ /* Ok, we're done. */
+ return total_size;
+}
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+static void
+epiphany_print_operand (FILE *file, rtx x, int code)
+{
+ switch (code)
+ {
+ case 'd':
+ fputs (epiphany_condition_codes[get_epiphany_condition_code (x)], file);
+ return;
+ case 'D':
+ fputs (epiphany_condition_codes[EPIPHANY_INVERSE_CONDITION_CODE
+ (get_epiphany_condition_code (x))],
+ file);
+ return;
+
+ case 'X':
+ current_frame_info.stld_sz = 8;
+ break;
+
+ case 'C' :
+ current_frame_info.stld_sz = 4;
+ break;
+
+ case 'c' :
+ current_frame_info.stld_sz = 2;
+ break;
+
+ case 'f':
+ fputs (REG_P (x) ? "jalr " : "bl ", file);
+ break;
+
+ case '-':
+ fprintf (file, "r%d", epiphany_m1reg);
+ return;
+
+ case 0 :
+ /* Do nothing special. */
+ break;
+ default :
+ /* Unknown flag. */
+ output_operand_lossage ("invalid operand output code");
+ }
+
+ switch (GET_CODE (x))
+ {
+ rtx addr;
+ rtx offset;
+
+ case REG :
+ fputs (reg_names[REGNO (x)], file);
+ break;
+ case MEM :
+ if (code == 0)
+ current_frame_info.stld_sz = 1;
+ fputc ('[', file);
+ addr = XEXP (x, 0);
+ switch (GET_CODE (addr))
+ {
+ case POST_INC:
+ offset = GEN_INT (GET_MODE_SIZE (GET_MODE (x)));
+ addr = XEXP (addr, 0);
+ break;
+ case POST_DEC:
+ offset = GEN_INT (-GET_MODE_SIZE (GET_MODE (x)));
+ addr = XEXP (addr, 0);
+ break;
+ case POST_MODIFY:
+ offset = XEXP (XEXP (addr, 1), 1);
+ addr = XEXP (addr, 0);
+ break;
+ default:
+ offset = 0;
+ break;
+ }
+ output_address (addr);
+ fputc (']', file);
+ if (offset)
+ {
+ fputc (',', file);
+ if (CONST_INT_P (offset)) switch (GET_MODE_SIZE (GET_MODE (x)))
+ {
+ default:
+ gcc_unreachable ();
+ case 8:
+ offset = GEN_INT (INTVAL (offset) >> 3);
+ break;
+ case 4:
+ offset = GEN_INT (INTVAL (offset) >> 2);
+ break;
+ case 2:
+ offset = GEN_INT (INTVAL (offset) >> 1);
+ break;
+ case 1:
+ break;
+ }
+ output_address (offset);
+ }
+ break;
+ case CONST_DOUBLE :
+ /* We handle SFmode constants here as output_addr_const doesn't. */
+ if (GET_MODE (x) == SFmode)
+ {
+ REAL_VALUE_TYPE d;
+ long l;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ REAL_VALUE_TO_TARGET_SINGLE (d, l);
+ fprintf (file, "%s0x%08lx", IMMEDIATE_PREFIX, l);
+ break;
+ }
+ /* Fall through. Let output_addr_const deal with it. */
+ case CONST_INT:
+ fprintf(file,"%s",IMMEDIATE_PREFIX);
+ if (code == 'C' || code == 'X')
+ {
+ fprintf (file, "%ld",
+ (long) (INTVAL (x) / current_frame_info.stld_sz));
+ break;
+ }
+ /* Fall through */
+ default :
+ output_addr_const (file, x);
+ break;
+ }
+}
+
+/* Print a memory address as an operand to reference that memory location. */
+
+static void
+epiphany_print_operand_address (FILE *file, rtx addr)
+{
+ register rtx base, index = 0;
+ int offset = 0;
+
+ switch (GET_CODE (addr))
+ {
+ case REG :
+ fputs (reg_names[REGNO (addr)], file);
+ break;
+ case SYMBOL_REF :
+ if (/*???*/ 0 && SYMBOL_REF_FUNCTION_P (addr))
+ {
+ output_addr_const (file, addr);
+ }
+ else
+ {
+ output_addr_const (file, addr);
+ }
+ break;
+ case PLUS :
+ if (GET_CODE (XEXP (addr, 0)) == CONST_INT)
+ offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
+ else if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
+ else
+ base = XEXP (addr, 0), index = XEXP (addr, 1);
+ gcc_assert (GET_CODE (base) == REG);
+ fputs (reg_names[REGNO (base)], file);
+ if (index == 0)
+ {
+ /*
+ ** ++rk quirky method to scale offset for ld/str.......
+ */
+ fprintf (file, ",%s%d", IMMEDIATE_PREFIX,
+ offset/current_frame_info.stld_sz);
+ }
+ else
+ {
+ switch (GET_CODE (index))
+ {
+ case REG:
+ fprintf (file, ",%s", reg_names[REGNO (index)]);
+ break;
+ case SYMBOL_REF:
+ fputc (',', file), output_addr_const (file, index);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ break;
+ case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC: case POST_MODIFY:
+ /* We shouldn't get here as we've lost the mode of the memory object
+ (which says how much to inc/dec by. */
+ gcc_unreachable ();
+ break;
+ default:
+ output_addr_const (file, addr);
+ break;
+ }
+}
+
+void
+epiphany_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
+ rtx *opvec ATTRIBUTE_UNUSED,
+ int noperands ATTRIBUTE_UNUSED)
+{
+ int i = epiphany_n_nops;
+ rtx pat ATTRIBUTE_UNUSED;
+
+ while (i--)
+ fputs ("\tnop\n", asm_out_file);
+}
+
+
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+
+static bool
+epiphany_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+
+ if (AGGREGATE_TYPE_P (type)
+ && (TYPE_MODE (type) == BLKmode || TYPE_NEEDS_CONSTRUCTING (type)))
+ return true;
+ return (size == -1 || size > 8);
+}
+
+/* For EPIPHANY, All aggregates and arguments greater than 8 bytes are
+ passed by reference. */
+
+static bool
+epiphany_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
+ enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ if (type)
+ {
+ if (AGGREGATE_TYPE_P (type)
+ && (mode == BLKmode || TYPE_NEEDS_CONSTRUCTING (type)))
+ return true;
+ }
+ return false;
+}
+
+
+static rtx
+epiphany_function_value (const_tree ret_type,
+ const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode;
+
+ mode = TYPE_MODE (ret_type);
+ /* We must change the mode like PROMOTE_MODE does.
+ ??? PROMOTE_MODE is ignored for non-scalar types.
+ The set of types tested here has to be kept in sync
+ with the one in explow.c:promote_mode. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) < 4
+ && (TREE_CODE (ret_type) == INTEGER_TYPE
+ || TREE_CODE (ret_type) == ENUMERAL_TYPE
+ || TREE_CODE (ret_type) == BOOLEAN_TYPE
+ || TREE_CODE (ret_type) == OFFSET_TYPE))
+ mode = SImode;
+ return gen_rtx_REG (mode, 0);
+}
+
+static rtx
+epiphany_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (mode, 0);
+}
+
+bool
+epiphany_function_value_regno_p (const unsigned int regno ATTRIBUTE_UNUSED)
+{
+ return regno == 0;
+}
+
+/* Fix up invalid option settings. */
+static void
+epiphany_override_options (void)
+{
+ if (epiphany_stack_offset < 4)
+ error ("stack_offset must be at least 4");
+ if (epiphany_stack_offset & 3)
+ error ("stack_offset must be a multiple of 4");
+ epiphany_stack_offset = (epiphany_stack_offset + 3) & -4;
+
+ /* This needs to be done at start up. It's convenient to do it here. */
+ epiphany_init ();
+}
+
+/* For a DImode load / store SET, make a SImode set for a
+ REG_FRAME_RELATED_EXPR note, using OFFSET to create a high or lowpart
+ subreg. */
+static rtx
+frame_subreg_note (rtx set, int offset)
+{
+ rtx src = simplify_gen_subreg (SImode, SET_SRC (set), DImode, offset);
+ rtx dst = simplify_gen_subreg (SImode, SET_DEST (set), DImode, offset);
+
+ set = gen_rtx_SET (VOIDmode, dst ,src);
+ RTX_FRAME_RELATED_P (set) = 1;
+ return set;
+}
+
+static rtx
+frame_insn (rtx x)
+{
+ int i;
+ rtx note = NULL_RTX;
+
+ if (GET_CODE (x) == PARALLEL)
+ {
+ rtx part = XVECEXP (x, 0, 0);
+
+ if (GET_MODE (SET_DEST (part)) == DImode)
+ {
+ note = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (XVECLEN (x, 0) + 1));
+ XVECEXP (note, 0, 0) = frame_subreg_note (part, 0);
+ XVECEXP (note, 0, 1) = frame_subreg_note (part, UNITS_PER_WORD);
+ for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
+ {
+ part = copy_rtx (XVECEXP (x, 0, i));
+
+ if (GET_CODE (part) == SET)
+ RTX_FRAME_RELATED_P (part) = 1;
+ XVECEXP (note, 0, i + 1) = part;
+ }
+ }
+ else
+ {
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ {
+ part = XVECEXP (x, 0, i);
+
+ if (GET_CODE (part) == SET)
+ RTX_FRAME_RELATED_P (part) = 1;
+ }
+ }
+ }
+ else if (GET_CODE (x) == SET && GET_MODE (SET_DEST (x)) == DImode)
+ note = gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2, frame_subreg_note (x, 0),
+ frame_subreg_note (x, UNITS_PER_WORD)));
+ x = emit_insn (x);
+ RTX_FRAME_RELATED_P (x) = 1;
+ if (note)
+ add_reg_note (x, REG_FRAME_RELATED_EXPR, note);
+ return x;
+}
+
+static rtx
+frame_move_insn (rtx to, rtx from)
+{
+ return frame_insn (gen_rtx_SET (VOIDmode, to, from));
+}
+
+/* Generate a MEM referring to a varargs argument slot. */
+
+static rtx
+gen_varargs_mem (enum machine_mode mode, rtx addr)
+{
+ rtx mem = gen_rtx_MEM (mode, addr);
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, get_varargs_alias_set ());
+ return mem;
+}
+
+/* Emit instructions to save or restore registers in the range [MIN..LIMIT) .
+ If EPILOGUE_P is 0, save; if it is one, restore.
+ ADDR is the stack slot to save the first register to; subsequent
+ registers are written to lower addresses.
+ However, the order of register pairs can be reversed in order to
+ use double-word load-store instructions. Likewise, an unpaired single
+ word save slot can be skipped while double saves are carried out, and
+ reused when a single register is to be saved. */
+
+static void
+epiphany_emit_save_restore (int min, int limit, rtx addr, int epilogue_p)
+{
+ int i;
+ int stack_offset
+ = current_frame_info.first_slot >= 0 ? epiphany_stack_offset : 0;
+ rtx skipped_mem = NULL_RTX;
+ int last_saved = limit - 1;
+
+ if (!optimize)
+ while (last_saved >= 0
+ && !TEST_HARD_REG_BIT (current_frame_info.gmask, last_saved))
+ last_saved--;
+ for (i = 0; i < limit; i++)
+ {
+ enum machine_mode mode = word_mode;
+ rtx mem, reg;
+ int n = i;
+ rtx (*gen_mem) (enum machine_mode, rtx) = gen_frame_mem;
+
+ /* Make sure we push the arguments in the right order. */
+ if (n < MAX_EPIPHANY_PARM_REGS && crtl->args.pretend_args_size)
+ {
+ n = MAX_EPIPHANY_PARM_REGS - 1 - n;
+ gen_mem = gen_varargs_mem;
+ }
+ if (stack_offset == current_frame_info.first_slot_size
+ && current_frame_info.first_slot >= 0)
+ {
+ if (current_frame_info.first_slot_size > UNITS_PER_WORD)
+ {
+ mode = DImode;
+ addr = plus_constant (addr, - (HOST_WIDE_INT) UNITS_PER_WORD);
+ }
+ if (i-- < min || !epilogue_p)
+ goto next_slot;
+ n = current_frame_info.first_slot;
+ gen_mem = gen_frame_mem;
+ }
+ else if (n == UNKNOWN_REGNUM
+ && stack_offset > current_frame_info.first_slot_size)
+ {
+ i--;
+ goto next_slot;
+ }
+ else if (!TEST_HARD_REG_BIT (current_frame_info.gmask, n))
+ continue;
+ else if (i < min)
+ goto next_slot;
+
+ /* Check for a register pair to save. */
+ if (n == i
+ && (n >= MAX_EPIPHANY_PARM_REGS || crtl->args.pretend_args_size == 0)
+ && (n & 1) == 0 && n+1 < limit
+ && TEST_HARD_REG_BIT (current_frame_info.gmask, n+1))
+ {
+ /* If it fits in the current stack slot pair, place it there. */
+ if (GET_CODE (addr) == PLUS && (stack_offset & 7) == 0
+ && stack_offset != 2 * UNITS_PER_WORD
+ && (current_frame_info.last_slot < 0
+ || INTVAL (XEXP (addr, 1)) != UNITS_PER_WORD)
+ && (n+1 != last_saved || !skipped_mem))
+ {
+ mode = DImode;
+ i++;
+ addr = plus_constant (addr, - (HOST_WIDE_INT) UNITS_PER_WORD);
+ }
+ /* If it fits in the following stack slot pair, that's fine, too. */
+ else if (GET_CODE (addr) == PLUS && (stack_offset & 7) == 4
+ && stack_offset != 2 * UNITS_PER_WORD
+ && stack_offset != 3 * UNITS_PER_WORD
+ && (current_frame_info.last_slot < 0
+ || INTVAL (XEXP (addr, 1)) != 2 * UNITS_PER_WORD)
+ && n + 1 != last_saved)
+ {
+ gcc_assert (!skipped_mem);
+ stack_offset -= GET_MODE_SIZE (mode);
+ skipped_mem = gen_mem (mode, addr);
+ mode = DImode;
+ i++;
+ addr = plus_constant (addr, - (HOST_WIDE_INT) 2 * UNITS_PER_WORD);
+ }
+ }
+ reg = gen_rtx_REG (mode, n);
+ if (mode != DImode && skipped_mem)
+ mem = skipped_mem;
+ else
+ mem = gen_mem (mode, addr);
+ if (!epilogue_p)
+ frame_move_insn (mem, reg);
+ else if (n >= MAX_EPIPHANY_PARM_REGS || !crtl->args.pretend_args_size)
+ emit_move_insn (reg, mem);
+ if (mem == skipped_mem)
+ {
+ skipped_mem = NULL_RTX;
+ continue;
+ }
+ next_slot:
+ addr = plus_constant (addr, - (HOST_WIDE_INT) UNITS_PER_WORD);
+ stack_offset -= GET_MODE_SIZE (mode);
+ }
+}
+
+void
+epiphany_expand_prologue (void)
+{
+ int interrupt_p;
+ enum epiphany_function_type fn_type;
+ rtx addr, mem, off, reg;
+ rtx save_config;
+
+ if (!current_frame_info.initialized)
+ epiphany_compute_frame_size (get_frame_size ());
+
+ /* It is debatable if we should adjust this by epiphany_stack_offset. */
+ if (flag_stack_usage_info)
+ current_function_static_stack_size = current_frame_info.total_size;
+
+ fn_type = epiphany_compute_function_type (current_function_decl);
+ interrupt_p = EPIPHANY_INTERRUPT_P (fn_type);
+
+ if (interrupt_p)
+ {
+ addr = plus_constant (stack_pointer_rtx,
+ - (HOST_WIDE_INT) 2 * UNITS_PER_WORD);
+ frame_move_insn (gen_frame_mem (DImode, addr),
+ gen_rtx_REG (DImode, GPR_0));
+ frame_move_insn (gen_rtx_REG (SImode, GPR_0),
+ gen_rtx_REG (word_mode, STATUS_REGNUM));
+ frame_move_insn (gen_rtx_REG (SImode, GPR_0+1),
+ gen_rtx_REG (word_mode, IRET_REGNUM));
+ mem = gen_frame_mem (BLKmode, stack_pointer_rtx);
+ off = GEN_INT (-current_frame_info.first_slot_offset);
+ frame_insn (gen_stack_adjust_add (off, mem));
+ if (!epiphany_uninterruptible_p (current_function_decl))
+ emit_insn (gen_gie ());
+ addr = plus_constant (stack_pointer_rtx,
+ current_frame_info.first_slot_offset
+ - (HOST_WIDE_INT) 3 * UNITS_PER_WORD);
+ }
+ else
+ {
+ addr = plus_constant (stack_pointer_rtx,
+ epiphany_stack_offset
+ - (HOST_WIDE_INT) UNITS_PER_WORD);
+ epiphany_emit_save_restore (0, current_frame_info.small_threshold,
+ addr, 0);
+ /* Allocate register save area; for small to medium size frames,
+ allocate the entire frame; this is joint with one register save. */
+ if (current_frame_info.first_slot >= 0)
+ {
+ enum machine_mode mode
+ = (current_frame_info.first_slot_size == UNITS_PER_WORD
+ ? word_mode : DImode);
+
+ off = GEN_INT (-current_frame_info.first_slot_offset);
+ mem = gen_frame_mem (BLKmode,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, off));
+ frame_insn (gen_stack_adjust_str
+ (gen_frame_mem (mode, stack_pointer_rtx),
+ gen_rtx_REG (mode, current_frame_info.first_slot),
+ off, mem));
+ addr = plus_constant (addr, current_frame_info.first_slot_offset);
+ }
+ }
+ epiphany_emit_save_restore (current_frame_info.small_threshold,
+ FIRST_PSEUDO_REGISTER, addr, 0);
+ if (current_frame_info.need_fp)
+ frame_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
+ /* For large frames, allocate bulk of frame. This is usually joint with one
+ register save. */
+ if (current_frame_info.last_slot >= 0)
+ {
+ gcc_assert (current_frame_info.last_slot != GPR_FP
+ || (!current_frame_info.need_fp
+ && current_frame_info.first_slot < 0));
+ off = GEN_INT (-current_frame_info.last_slot_offset);
+ mem = gen_frame_mem (BLKmode,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, off));
+ reg = gen_rtx_REG (Pmode, GPR_IP);
+ frame_move_insn (reg, off);
+ frame_insn (gen_stack_adjust_str
+ (gen_frame_mem (word_mode, stack_pointer_rtx),
+ gen_rtx_REG (word_mode, current_frame_info.last_slot),
+ reg, mem));
+ }
+ /* If there is only one or no register to save, yet we have a large frame,
+ use an add. */
+ else if (current_frame_info.last_slot_offset)
+ {
+ mem = gen_frame_mem (BLKmode,
+ plus_constant (stack_pointer_rtx,
+ current_frame_info.last_slot_offset));
+ off = GEN_INT (-current_frame_info.last_slot_offset);
+ if (!SIMM11 (INTVAL (off)))
+ {
+ reg = gen_rtx_REG (Pmode, GPR_IP);
+ frame_move_insn (reg, off);
+ off = reg;
+ }
+ frame_insn (gen_stack_adjust_add (off, mem));
+ }
+
+ /* Mode switching uses get_hard_reg_initial_val after
+ emit_initial_value_sets, so we have to fix this up now. */
+ save_config = has_hard_reg_initial_val (SImode, CONFIG_REGNUM);
+ if (save_config)
+ {
+ if (REG_P (save_config))
+ {
+ if (REGNO (save_config) >= FIRST_PSEUDO_REGISTER)
+ gcc_assert (!df_regs_ever_live_p (REGNO (save_config)));
+ else
+ frame_move_insn (save_config,
+ get_hard_reg_initial_reg (save_config));
+ }
+ else
+ {
+ rtx save_dst = save_config;
+
+ reg = gen_rtx_REG (SImode, GPR_IP);
+ gcc_assert (MEM_P (save_dst));
+ if (!memory_operand (save_dst, SImode))
+ {
+ rtx addr = XEXP (save_dst, 0);
+ rtx reg2 = gen_rtx_REG (SImode, GPR_16);
+
+ gcc_assert (GET_CODE (addr) == PLUS);
+ gcc_assert (XEXP (addr, 0) == hard_frame_pointer_rtx
+ || XEXP (addr, 0) == stack_pointer_rtx);
+ emit_move_insn (reg2, XEXP (addr, 1));
+ save_dst
+ = replace_equiv_address (save_dst,
+ gen_rtx_PLUS (Pmode, XEXP (addr, 0),
+ reg2));
+ }
+ emit_move_insn (reg, get_hard_reg_initial_reg (save_config));
+ emit_move_insn (save_dst, reg);
+ }
+ }
+}
+
+void
+epiphany_expand_epilogue (int sibcall_p)
+{
+ int interrupt_p;
+ enum epiphany_function_type fn_type;
+ rtx mem, addr, reg, off;
+ HOST_WIDE_INT restore_offset;
+
+ fn_type = epiphany_compute_function_type( current_function_decl);
+ interrupt_p = EPIPHANY_INTERRUPT_P (fn_type);
+
+ /* For variable frames, deallocate bulk of frame. */
+ if (current_frame_info.need_fp)
+ {
+ mem = gen_frame_mem (BLKmode, stack_pointer_rtx);
+ emit_insn (gen_stack_adjust_mov (mem));
+ }
+ /* Else for large static frames, deallocate bulk of frame. */
+ else if (current_frame_info.last_slot_offset)
+ {
+ mem = gen_frame_mem (BLKmode, stack_pointer_rtx);
+ reg = gen_rtx_REG (Pmode, GPR_IP);
+ emit_move_insn (reg, GEN_INT (current_frame_info.last_slot_offset));
+ emit_insn (gen_stack_adjust_add (reg, mem));
+ }
+ restore_offset = (interrupt_p
+ ? - 3 * UNITS_PER_WORD
+ : epiphany_stack_offset - (HOST_WIDE_INT) UNITS_PER_WORD);
+ addr = plus_constant (stack_pointer_rtx,
+ (current_frame_info.first_slot_offset
+ + restore_offset));
+ epiphany_emit_save_restore (current_frame_info.small_threshold,
+ FIRST_PSEUDO_REGISTER, addr, 1);
+
+ if (interrupt_p && !epiphany_uninterruptible_p (current_function_decl))
+ emit_insn (gen_gid ());
+
+ off = GEN_INT (current_frame_info.first_slot_offset);
+ mem = gen_frame_mem (BLKmode, stack_pointer_rtx);
+ /* For large / variable size frames, deallocating the register save area is
+ joint with one register restore; for medium size frames, we use a
+ dummy post-increment load to dealloacte the whole frame. */
+ if (!SIMM11 (INTVAL (off)) || current_frame_info.last_slot >= 0)
+ {
+ emit_insn (gen_stack_adjust_ldr
+ (gen_rtx_REG (word_mode,
+ (current_frame_info.last_slot >= 0
+ ? current_frame_info.last_slot : GPR_IP)),
+ gen_frame_mem (word_mode, stack_pointer_rtx),
+ off,
+ mem));
+ }
+ /* While for small frames, we deallocate the entire frame with one add. */
+ else if (INTVAL (off))
+ {
+ emit_insn (gen_stack_adjust_add (off, mem));
+ }
+ if (interrupt_p)
+ {
+ frame_move_insn (gen_rtx_REG (word_mode, STATUS_REGNUM),
+ gen_rtx_REG (SImode, GPR_0));
+ frame_move_insn (gen_rtx_REG (word_mode, IRET_REGNUM),
+ gen_rtx_REG (SImode, GPR_0+1));
+ addr = plus_constant (stack_pointer_rtx,
+ - (HOST_WIDE_INT) 2 * UNITS_PER_WORD);
+ frame_move_insn (gen_rtx_REG (DImode, GPR_0),
+ gen_frame_mem (DImode, addr));
+ }
+ addr = plus_constant (stack_pointer_rtx,
+ epiphany_stack_offset - (HOST_WIDE_INT) UNITS_PER_WORD);
+ epiphany_emit_save_restore (0, current_frame_info.small_threshold, addr, 1);
+ if (!sibcall_p)
+ {
+ if (interrupt_p)
+ emit_jump_insn (gen_return_internal_interrupt());
+ else
+ emit_jump_insn (gen_return_i ());
+ }
+}
+
+int
+epiphany_initial_elimination_offset (int from, int to)
+{
+ epiphany_compute_frame_size (get_frame_size ());
+ if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return current_frame_info.total_size - current_frame_info.reg_size;
+ if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return current_frame_info.first_slot_offset - current_frame_info.reg_size;
+ if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return (current_frame_info.total_size
+ - ((current_frame_info.pretend_size + 4) & -8));
+ if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return (current_frame_info.first_slot_offset
+ - ((current_frame_info.pretend_size + 4) & -8));
+ gcc_unreachable ();
+}
+
+static int
+epiphany_issue_rate (void)
+{
+ return 2;
+}
+
+/* Function to update the integer COST
+ based on the relationship between INSN that is dependent on
+ DEP_INSN through the dependence LINK. The default is to make no
+ adjustment to COST. This can be used for example to specify to
+ the scheduler that an output- or anti-dependence does not incur
+ the same cost as a data-dependence. The return value should be
+ the new value for COST. */
+static int
+epiphany_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+{
+ if (REG_NOTE_KIND (link) == 0)
+ {
+ rtx dep_set;
+
+ if (recog_memoized (insn) < 0
+ || recog_memoized (dep_insn) < 0)
+ return cost;
+
+ dep_set = single_set (dep_insn);
+
+ /* The latency that we specify in the scheduling description refers
+ to the actual output, not to an auto-increment register; for that,
+ the latency is one. */
+ if (dep_set && MEM_P (SET_SRC (dep_set)) && cost > 1)
+ {
+ rtx set = single_set (insn);
+
+ if (set
+ && !reg_mentioned_p (SET_DEST (dep_set), SET_SRC (set))
+ && (!MEM_P (SET_DEST (set))
+ || !reg_mentioned_p (SET_DEST (dep_set),
+ XEXP (SET_DEST (set), 0))))
+ cost = 1;
+ }
+ }
+ return cost;
+}
+
+#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_BASE_P (X)
+
+#define RTX_OK_FOR_BASE_P(X) \
+ (REG_P (X) && REG_OK_FOR_BASE_P (X))
+
+#define RTX_OK_FOR_INDEX_P(MODE, X) \
+ ((GET_MODE_CLASS (MODE) != MODE_VECTOR_INT \
+ || epiphany_vect_align >= GET_MODE_SIZE (MODE)) \
+ && (REG_P (X) && REG_OK_FOR_INDEX_P (X)))
+
+#define LEGITIMATE_OFFSET_ADDRESS_P(MODE, X) \
+(GET_CODE (X) == PLUS \
+ && RTX_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && (RTX_OK_FOR_INDEX_P (MODE, XEXP (X, 1)) \
+ || RTX_OK_FOR_OFFSET_P (MODE, XEXP (X, 1))))
+
+static bool
+epiphany_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
+{
+#define REG_OK_FOR_BASE_P(X) \
+ (strict ? GPR_P (REGNO (X)) : GPR_AP_OR_PSEUDO_P (REGNO (X)))
+ if (RTX_OK_FOR_BASE_P (x))
+ return true;
+ if (RTX_FRAME_OFFSET_P (x))
+ return true;
+ if (LEGITIMATE_OFFSET_ADDRESS_P (mode, x))
+ return true;
+ if (TARGET_POST_INC
+ && (GET_CODE (x) == POST_DEC || GET_CODE (x) == POST_INC)
+ && RTX_OK_FOR_BASE_P (XEXP ((x), 0)))
+ return true;
+ if ((TARGET_POST_MODIFY || reload_completed)
+ && GET_CODE (x) == POST_MODIFY
+ && GET_CODE (XEXP ((x), 1)) == PLUS
+ && rtx_equal_p (XEXP ((x), 0), XEXP (XEXP ((x), 1), 0))
+ && LEGITIMATE_OFFSET_ADDRESS_P (mode, XEXP ((x), 1)))
+ return true;
+ if (mode == BLKmode)
+ return true;
+ return false;
+}
+
+static reg_class_t
+epiphany_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ secondary_reload_info *sri)
+{
+ /* This could give more reload inheritance, but we are missing some
+ reload infrastructure. */
+ if (0)
+ if (in_p && GET_CODE (x) == UNSPEC
+ && satisfies_constraint_Sra (x) && !satisfies_constraint_Rra (x))
+ {
+ gcc_assert (rclass == GENERAL_REGS);
+ sri->icode = CODE_FOR_reload_insi_ra;
+ return NO_REGS;
+ }
+ return NO_REGS;
+}
+
+bool
+epiphany_is_long_call_p (rtx x)
+{
+ tree decl = SYMBOL_REF_DECL (x);
+ bool ret_val = !TARGET_SHORT_CALLS;
+ tree attrs;
+
+ /* ??? Is it safe to default to ret_val if decl is NULL? We should
+ probably encode information via encode_section_info, and also
+ have (an) option(s) to take SYMBOL_FLAG_LOCAL and/or SYMBOL_FLAG_EXTERNAL
+ into account. */
+ if (decl)
+ {
+ attrs = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+ if (lookup_attribute ("long_call", attrs))
+ ret_val = true;
+ else if (lookup_attribute ("short_call", attrs))
+ ret_val = false;
+ }
+ return ret_val;
+}
+
+bool
+epiphany_small16 (rtx x)
+{
+ rtx base = x;
+ rtx offs ATTRIBUTE_UNUSED = const0_rtx;
+
+ if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
+ {
+ base = XEXP (XEXP (x, 0), 0);
+ offs = XEXP (XEXP (x, 0), 1);
+ }
+ if (GET_CODE (base) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (base)
+ && epiphany_is_long_call_p (base))
+ return false;
+ return TARGET_SMALL16 != 0;
+}
+
+/* Return nonzero if it is ok to make a tail-call to DECL. */
+static bool
+epiphany_function_ok_for_sibcall (tree decl, tree exp)
+{
+ bool cfun_interrupt_p, call_interrupt_p;
+
+ cfun_interrupt_p = EPIPHANY_INTERRUPT_P (epiphany_compute_function_type
+ (current_function_decl));
+ if (decl)
+ call_interrupt_p = EPIPHANY_INTERRUPT_P (epiphany_compute_function_type (decl));
+ else
+ {
+ tree fn_type = TREE_TYPE (CALL_EXPR_FN (exp));
+
+ gcc_assert (POINTER_TYPE_P (fn_type));
+ fn_type = TREE_TYPE (fn_type);
+ gcc_assert (TREE_CODE (fn_type) == FUNCTION_TYPE
+ || TREE_CODE (fn_type) == METHOD_TYPE);
+ call_interrupt_p
+ = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (fn_type)) != NULL;
+ }
+
+ /* Don't tailcall from or to an ISR routine - although we could in
+ principle tailcall from one ISR routine to another, we'd need to
+ handle this in sibcall_epilogue to make it work. */
+ if (cfun_interrupt_p || call_interrupt_p)
+ return false;
+
+ /* Everything else is ok. */
+ return true;
+}
+
+/* T is a function declaration or the MEM_EXPR of a MEM passed to a call
+ expander.
+ Return true iff the type of T has the uninterruptible attribute.
+ If T is NULL, return false. */
+bool
+epiphany_uninterruptible_p (tree t)
+{
+ tree attrs;
+
+ if (t)
+ {
+ attrs = TYPE_ATTRIBUTES (TREE_TYPE (t));
+ if (lookup_attribute ("disinterrupt", attrs))
+ return true;
+ }
+ return false;
+}
+
+bool
+epiphany_call_uninterruptible_p (rtx mem)
+{
+ rtx addr = XEXP (mem, 0);
+ tree t = NULL_TREE;
+
+ if (GET_CODE (addr) == SYMBOL_REF)
+ t = SYMBOL_REF_DECL (addr);
+ if (!t)
+ t = MEM_EXPR (mem);
+ return epiphany_uninterruptible_p (t);
+}
+
+static enum machine_mode
+epiphany_promote_function_mode (const_tree type, enum machine_mode mode,
+ int *punsignedp ATTRIBUTE_UNUSED,
+ const_tree funtype ATTRIBUTE_UNUSED,
+ int for_return ATTRIBUTE_UNUSED)
+{
+ int dummy;
+
+ return promote_mode (type, mode, &dummy);
+}
+
+static void
+epiphany_conditional_register_usage (void)
+{
+ int i;
+
+ if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
+ {
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
+ }
+ if (TARGET_HALF_REG_FILE)
+ {
+ for (i = 32; i <= 63; i++)
+ {
+ fixed_regs[i] = 1;
+ call_used_regs[i] = 1;
+ }
+ }
+ if (epiphany_m1reg >= 0)
+ {
+ fixed_regs[epiphany_m1reg] = 1;
+ call_used_regs[epiphany_m1reg] = 1;
+ }
+ if (!TARGET_PREFER_SHORT_INSN_REGS)
+ CLEAR_HARD_REG_SET (reg_class_contents[SHORT_INSN_REGS]);
+ COPY_HARD_REG_SET (reg_class_contents[SIBCALL_REGS],
+ reg_class_contents[GENERAL_REGS]);
+ /* It would be simpler and quicker if we could just use
+ AND_COMPL_HARD_REG_SET, alas, call_used_reg_set is yet uninitialized;
+ it is set up later by our caller. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (!call_used_regs[i])
+ CLEAR_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], i);
+}
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+/* On the EPIPHANY the first MAX_EPIPHANY_PARM_REGS args are normally in
+ registers and the rest are pushed. */
+static rtx
+epiphany_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS cum = *get_cumulative_args (cum_v);
+
+ if (PASS_IN_REG_P (cum, mode, type))
+ return gen_rtx_REG (mode, ROUND_ADVANCE_CUM (cum, mode, type));
+ return 0;
+}
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+static void
+epiphany_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+
+ *cum = ROUND_ADVANCE_CUM (*cum, mode, type) + ROUND_ADVANCE_ARG (mode, type);
+}
+
+/* Nested function support.
+ An epiphany trampoline looks like this:
+ mov r16,%low(fnaddr)
+ movt r16,%high(fnaddr)
+ mov ip,%low(cxt)
+ movt ip,%high(cxt)
+ jr r16 */
+
+#define EPIPHANY_LOW_RTX(X) \
+ (gen_rtx_IOR (SImode, \
+ gen_rtx_ASHIFT (SImode, \
+ gen_rtx_AND (SImode, (X), GEN_INT (0xff)), GEN_INT (5)), \
+ gen_rtx_ASHIFT (SImode, \
+ gen_rtx_AND (SImode, (X), GEN_INT (0xff00)), GEN_INT (12))))
+#define EPIPHANY_HIGH_RTX(X) \
+ EPIPHANY_LOW_RTX (gen_rtx_LSHIFTRT (SImode, (X), GEN_INT (16)))
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+static void
+epiphany_trampoline_init (rtx tramp_mem, tree fndecl, rtx cxt)
+{
+ rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
+ rtx tramp = force_reg (Pmode, XEXP (tramp_mem, 0));
+
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
+ gen_rtx_IOR (SImode, GEN_INT (0x4002000b),
+ EPIPHANY_LOW_RTX (fnaddr)));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
+ gen_rtx_IOR (SImode, GEN_INT (0x5002000b),
+ EPIPHANY_HIGH_RTX (fnaddr)));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
+ gen_rtx_IOR (SImode, GEN_INT (0x2002800b),
+ EPIPHANY_LOW_RTX (cxt)));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
+ gen_rtx_IOR (SImode, GEN_INT (0x3002800b),
+ EPIPHANY_HIGH_RTX (cxt)));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 16)),
+ GEN_INT (0x0802014f));
+}
+
+bool
+epiphany_optimize_mode_switching (int entity)
+{
+ if (MACHINE_FUNCTION (cfun)->sw_entities_processed & (1 << entity))
+ return false;
+ switch (entity)
+ {
+ case EPIPHANY_MSW_ENTITY_AND:
+ case EPIPHANY_MSW_ENTITY_OR:
+ return true;
+ case EPIPHANY_MSW_ENTITY_NEAREST:
+ case EPIPHANY_MSW_ENTITY_TRUNC:
+ return optimize > 0;
+ case EPIPHANY_MSW_ENTITY_ROUND_UNKNOWN:
+ return MACHINE_FUNCTION (cfun)->unknown_mode_uses != 0;
+ case EPIPHANY_MSW_ENTITY_ROUND_KNOWN:
+ return (MACHINE_FUNCTION (cfun)->sw_entities_processed
+ & (1 << EPIPHANY_MSW_ENTITY_ROUND_UNKNOWN)) != 0;
+ case EPIPHANY_MSW_ENTITY_FPU_OMNIBUS:
+ return optimize == 0 || current_pass == &pass_mode_switch_use.pass;
+ }
+ gcc_unreachable ();
+}
+
+int
+epiphany_mode_priority_to_mode (int entity, unsigned priority)
+{
+ if (entity == EPIPHANY_MSW_ENTITY_AND || entity == EPIPHANY_MSW_ENTITY_OR)
+ return priority;
+ if (priority > 3)
+ switch (priority)
+ {
+ case 4: return FP_MODE_ROUND_UNKNOWN;
+ case 5: return FP_MODE_NONE;
+ default: gcc_unreachable ();
+ }
+ switch ((enum attr_fp_mode) epiphany_normal_fp_mode)
+ {
+ case FP_MODE_INT:
+ switch (priority)
+ {
+ case 0: return FP_MODE_INT;
+ case 1: return epiphany_normal_fp_rounding;
+ case 2: return (epiphany_normal_fp_rounding == FP_MODE_ROUND_NEAREST
+ ? FP_MODE_ROUND_TRUNC : FP_MODE_ROUND_NEAREST);
+ case 3: return FP_MODE_CALLER;
+ }
+ case FP_MODE_ROUND_NEAREST:
+ case FP_MODE_CALLER:
+ switch (priority)
+ {
+ case 0: return FP_MODE_ROUND_NEAREST;
+ case 1: return FP_MODE_ROUND_TRUNC;
+ case 2: return FP_MODE_INT;
+ case 3: return FP_MODE_CALLER;
+ }
+ case FP_MODE_ROUND_TRUNC:
+ switch (priority)
+ {
+ case 0: return FP_MODE_ROUND_TRUNC;
+ case 1: return FP_MODE_ROUND_NEAREST;
+ case 2: return FP_MODE_INT;
+ case 3: return FP_MODE_CALLER;
+ }
+ case FP_MODE_ROUND_UNKNOWN:
+ case FP_MODE_NONE:
+ gcc_unreachable ();
+ }
+ gcc_unreachable ();
+}
+
+int
+epiphany_mode_needed (int entity, rtx insn)
+{
+ enum attr_fp_mode mode;
+
+ if (recog_memoized (insn) < 0)
+ {
+ if (entity == EPIPHANY_MSW_ENTITY_AND
+ || entity == EPIPHANY_MSW_ENTITY_OR)
+ return 2;
+ return FP_MODE_NONE;
+ }
+ mode = get_attr_fp_mode (insn);
+
+ switch (entity)
+ {
+ case EPIPHANY_MSW_ENTITY_AND:
+ return mode != FP_MODE_INT ? 1 : 2;
+ case EPIPHANY_MSW_ENTITY_OR:
+ return mode == FP_MODE_INT ? 1 : 2;
+ case EPIPHANY_MSW_ENTITY_ROUND_KNOWN:
+ if (recog_memoized (insn) == CODE_FOR_set_fp_mode)
+ mode = (enum attr_fp_mode) epiphany_mode_after (entity, mode, insn);
+ /* Fall through. */
+ case EPIPHANY_MSW_ENTITY_NEAREST:
+ case EPIPHANY_MSW_ENTITY_TRUNC:
+ if (mode == FP_MODE_ROUND_UNKNOWN)
+ {
+ MACHINE_FUNCTION (cfun)->unknown_mode_uses++;
+ return FP_MODE_NONE;
+ }
+ return mode;
+ case EPIPHANY_MSW_ENTITY_ROUND_UNKNOWN:
+ if (mode == FP_MODE_ROUND_NEAREST || mode == FP_MODE_ROUND_TRUNC)
+ return FP_MODE_ROUND_UNKNOWN;
+ return mode;
+ case EPIPHANY_MSW_ENTITY_FPU_OMNIBUS:
+ if (mode == FP_MODE_ROUND_UNKNOWN)
+ return epiphany_normal_fp_rounding;
+ return mode;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+int
+epiphany_mode_entry_exit (int entity, bool exit)
+{
+ int normal_mode = epiphany_normal_fp_mode ;
+
+ MACHINE_FUNCTION (cfun)->sw_entities_processed |= (1 << entity);
+ if (epiphany_is_interrupt_p (current_function_decl))
+ normal_mode = FP_MODE_CALLER;
+ switch (entity)
+ {
+ case EPIPHANY_MSW_ENTITY_AND:
+ if (exit)
+ return normal_mode != FP_MODE_INT ? 1 : 2;
+ return 0;
+ case EPIPHANY_MSW_ENTITY_OR:
+ if (exit)
+ return normal_mode == FP_MODE_INT ? 1 : 2;
+ return 0;
+ case EPIPHANY_MSW_ENTITY_ROUND_UNKNOWN:
+ if (normal_mode == FP_MODE_ROUND_NEAREST
+ || normal_mode == FP_MODE_ROUND_TRUNC)
+ return FP_MODE_ROUND_UNKNOWN;
+ /* Fall through. */
+ case EPIPHANY_MSW_ENTITY_NEAREST:
+ case EPIPHANY_MSW_ENTITY_TRUNC:
+ case EPIPHANY_MSW_ENTITY_ROUND_KNOWN:
+ case EPIPHANY_MSW_ENTITY_FPU_OMNIBUS:
+ return normal_mode;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+int
+epiphany_mode_after (int entity, int last_mode, rtx insn)
+{
+ /* We have too few call-saved registers to hope to keep the masks across
+ calls. */
+ if (entity == EPIPHANY_MSW_ENTITY_AND || entity == EPIPHANY_MSW_ENTITY_OR)
+ {
+ if (GET_CODE (insn) == CALL_INSN)
+ return 0;
+ return last_mode;
+ }
+ if (recog_memoized (insn) < 0)
+ return last_mode;
+ if (get_attr_fp_mode (insn) == FP_MODE_ROUND_UNKNOWN
+ && last_mode != FP_MODE_ROUND_NEAREST && last_mode != FP_MODE_ROUND_TRUNC)
+ {
+ if (entity == EPIPHANY_MSW_ENTITY_NEAREST)
+ return FP_MODE_ROUND_NEAREST;
+ if (entity == EPIPHANY_MSW_ENTITY_TRUNC)
+ return FP_MODE_ROUND_TRUNC;
+ }
+ if (recog_memoized (insn) == CODE_FOR_set_fp_mode)
+ {
+ rtx src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
+ int fp_mode;
+
+ if (REG_P (src))
+ return FP_MODE_CALLER;
+ fp_mode = INTVAL (XVECEXP (XEXP (src, 0), 0, 0));
+ if (entity == EPIPHANY_MSW_ENTITY_ROUND_UNKNOWN
+ && (fp_mode == FP_MODE_ROUND_NEAREST
+ || fp_mode == EPIPHANY_MSW_ENTITY_TRUNC))
+ return FP_MODE_ROUND_UNKNOWN;
+ return fp_mode;
+ }
+ return last_mode;
+}
+
+void
+emit_set_fp_mode (int entity, int mode, HARD_REG_SET regs_live ATTRIBUTE_UNUSED)
+{
+ rtx save_cc, cc_reg, mask, src, src2;
+ enum attr_fp_mode fp_mode;
+
+ if (!MACHINE_FUNCTION (cfun)->and_mask)
+ {
+ MACHINE_FUNCTION (cfun)->and_mask = gen_reg_rtx (SImode);
+ MACHINE_FUNCTION (cfun)->or_mask = gen_reg_rtx (SImode);
+ }
+ if (entity == EPIPHANY_MSW_ENTITY_AND)
+ {
+ gcc_assert (mode >= 0 && mode <= 2);
+ if (mode == 1)
+ emit_move_insn (MACHINE_FUNCTION (cfun)->and_mask,
+ gen_int_mode (0xfff1fffe, SImode));
+ return;
+ }
+ else if (entity == EPIPHANY_MSW_ENTITY_OR)
+ {
+ gcc_assert (mode >= 0 && mode <= 2);
+ if (mode == 1)
+ emit_move_insn (MACHINE_FUNCTION (cfun)->or_mask, GEN_INT(0x00080000));
+ return;
+ }
+ fp_mode = (enum attr_fp_mode) mode;
+ src = NULL_RTX;
+
+ switch (fp_mode)
+ {
+ case FP_MODE_CALLER:
+ src = get_hard_reg_initial_val (SImode, CONFIG_REGNUM);
+ mask = MACHINE_FUNCTION (cfun)->and_mask;
+ break;
+ case FP_MODE_ROUND_UNKNOWN:
+ MACHINE_FUNCTION (cfun)->unknown_mode_sets++;
+ mask = MACHINE_FUNCTION (cfun)->and_mask;
+ break;
+ case FP_MODE_ROUND_NEAREST:
+ if (entity == EPIPHANY_MSW_ENTITY_TRUNC)
+ return;
+ mask = MACHINE_FUNCTION (cfun)->and_mask;
+ break;
+ case FP_MODE_ROUND_TRUNC:
+ if (entity == EPIPHANY_MSW_ENTITY_NEAREST)
+ return;
+ mask = MACHINE_FUNCTION (cfun)->and_mask;
+ break;
+ case FP_MODE_INT:
+ mask = MACHINE_FUNCTION (cfun)->or_mask;
+ break;
+ case FP_MODE_NONE:
+ default:
+ gcc_unreachable ();
+ }
+ save_cc = gen_reg_rtx (CCmode);
+ cc_reg = gen_rtx_REG (CCmode, CC_REGNUM);
+ emit_move_insn (save_cc, cc_reg);
+ mask = force_reg (SImode, mask);
+ if (!src)
+ {
+ rtvec v = gen_rtvec (1, GEN_INT (fp_mode));
+
+ src = gen_rtx_CONST (SImode, gen_rtx_UNSPEC (SImode, v, UNSPEC_FP_MODE));
+ }
+ if (entity == EPIPHANY_MSW_ENTITY_ROUND_KNOWN
+ || entity == EPIPHANY_MSW_ENTITY_FPU_OMNIBUS)
+ src2 = copy_rtx (src);
+ else
+ {
+ rtvec v = gen_rtvec (1, GEN_INT (FP_MODE_ROUND_UNKNOWN));
+
+ src2 = gen_rtx_CONST (SImode, gen_rtx_UNSPEC (SImode, v, UNSPEC_FP_MODE));
+ }
+ emit_insn (gen_set_fp_mode (src, src2, mask));
+ emit_move_insn (cc_reg, save_cc);
+}
+
+void
+epiphany_expand_set_fp_mode (rtx *operands)
+{
+ rtx ctrl = gen_rtx_REG (SImode, CONFIG_REGNUM);
+ rtx src = operands[0];
+ rtx mask_reg = operands[2];
+ rtx scratch = operands[3];
+ enum attr_fp_mode fp_mode;
+
+
+ gcc_assert (rtx_equal_p (src, operands[1])
+ /* Sometimes reload gets silly and reloads the same pseudo
+ into different registers. */
+ || (REG_P (src) && REG_P (operands[1])));
+
+ if (!epiphany_uninterruptible_p (current_function_decl))
+ emit_insn (gen_gid ());
+ emit_move_insn (scratch, ctrl);
+
+ if (GET_CODE (src) == REG)
+ {
+ /* FP_MODE_CALLER */
+ emit_insn (gen_xorsi3 (scratch, scratch, src));
+ emit_insn (gen_andsi3 (scratch, scratch, mask_reg));
+ emit_insn (gen_xorsi3 (scratch, scratch, src));
+ }
+ else
+ {
+ gcc_assert (GET_CODE (src) == CONST);
+ src = XEXP (src, 0);
+ fp_mode = (enum attr_fp_mode) INTVAL (XVECEXP (src, 0, 0));
+ switch (fp_mode)
+ {
+ case FP_MODE_ROUND_NEAREST:
+ emit_insn (gen_andsi3 (scratch, scratch, mask_reg));
+ break;
+ case FP_MODE_ROUND_TRUNC:
+ emit_insn (gen_andsi3 (scratch, scratch, mask_reg));
+ emit_insn (gen_add2_insn (scratch, const1_rtx));
+ break;
+ case FP_MODE_INT:
+ emit_insn (gen_iorsi3 (scratch, scratch, mask_reg));
+ break;
+ case FP_MODE_CALLER:
+ case FP_MODE_ROUND_UNKNOWN:
+ case FP_MODE_NONE:
+ gcc_unreachable ();
+ }
+ }
+ emit_move_insn (ctrl, scratch);
+ if (!epiphany_uninterruptible_p (current_function_decl))
+ emit_insn (gen_gie ());
+}
+
+void
+epiphany_insert_mode_switch_use (rtx insn,
+ int entity ATTRIBUTE_UNUSED,
+ int mode ATTRIBUTE_UNUSED)
+{
+ rtx pat = PATTERN (insn);
+ rtvec v;
+ int len, i;
+ rtx near = gen_rtx_REG (SImode, FP_NEAREST_REGNUM);
+ rtx trunc = gen_rtx_REG (SImode, FP_TRUNCATE_REGNUM);
+
+ if (entity != EPIPHANY_MSW_ENTITY_FPU_OMNIBUS)
+ return;
+ switch ((enum attr_fp_mode) get_attr_fp_mode (insn))
+ {
+ case FP_MODE_ROUND_NEAREST:
+ near = gen_rtx_USE (VOIDmode, near);
+ trunc = gen_rtx_CLOBBER (VOIDmode, trunc);
+ break;
+ case FP_MODE_ROUND_TRUNC:
+ near = gen_rtx_CLOBBER (VOIDmode, near);
+ trunc = gen_rtx_USE (VOIDmode, trunc);
+ break;
+ case FP_MODE_ROUND_UNKNOWN:
+ near = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, FP_ANYFP_REGNUM));
+ trunc = copy_rtx (near);
+ /* Fall through. */
+ case FP_MODE_INT:
+ case FP_MODE_CALLER:
+ near = gen_rtx_USE (VOIDmode, near);
+ trunc = gen_rtx_USE (VOIDmode, trunc);
+ break;
+ case FP_MODE_NONE:
+ gcc_unreachable ();
+ }
+ gcc_assert (GET_CODE (pat) == PARALLEL);
+ len = XVECLEN (pat, 0);
+ v = rtvec_alloc (len + 2);
+ for (i = 0; i < len; i++)
+ RTVEC_ELT (v, i) = XVECEXP (pat, 0, i);
+ RTVEC_ELT (v, len) = near;
+ RTVEC_ELT (v, len + 1) = trunc;
+ pat = gen_rtx_PARALLEL (VOIDmode, v);
+ PATTERN (insn) = pat;
+ MACHINE_FUNCTION (cfun)->control_use_inserted = true;
+}
+
+bool
+epiphany_epilogue_uses (int regno)
+{
+ if (regno == GPR_LR)
+ return true;
+ if (reload_completed && epiphany_is_interrupt_p (current_function_decl))
+ {
+ if (fixed_regs[regno]
+ && regno != STATUS_REGNUM && regno != IRET_REGNUM
+ && regno != FP_NEAREST_REGNUM && regno != FP_TRUNCATE_REGNUM)
+ return false;
+ return true;
+ }
+ if (regno == FP_NEAREST_REGNUM
+ && epiphany_normal_fp_mode != FP_MODE_ROUND_TRUNC)
+ return true;
+ if (regno == FP_TRUNCATE_REGNUM
+ && epiphany_normal_fp_mode != FP_MODE_ROUND_NEAREST)
+ return true;
+ return false;
+}
+
+static unsigned int
+epiphany_min_divisions_for_recip_mul (enum machine_mode mode)
+{
+ if (flag_reciprocal_math && mode == SFmode)
+ /* We'll expand into a multiply-by-reciprocal anyway, so we might a well do
+ it already at the tree level and expose it to further optimizations. */
+ return 1;
+ return default_min_divisions_for_recip_mul (mode);
+}
+
+static enum machine_mode
+epiphany_preferred_simd_mode (enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return TARGET_VECT_DOUBLE ? DImode : SImode;
+}
+
+static bool
+epiphany_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (mode == V2SFmode)
+ return true;
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
+ && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
+ return true;
+ return false;
+}
+
+static bool
+epiphany_vector_alignment_reachable (const_tree type, bool is_packed)
+{
+ /* Vectors which aren't in packed structures will not be less aligned than
+ the natural alignment of their element type, so this is safe. */
+ if (TYPE_ALIGN_UNIT (type) == 4)
+ return !is_packed;
+
+ return default_builtin_vector_alignment_reachable (type, is_packed);
+}
+
+static bool
+epiphany_support_vector_misalignment (enum machine_mode mode, const_tree type,
+ int misalignment, bool is_packed)
+{
+ if (GET_MODE_SIZE (mode) == 8 && misalignment % 4 == 0)
+ return true;
+ return default_builtin_support_vector_misalignment (mode, type, misalignment,
+ is_packed);
+}
+
+/* STRUCTURE_SIZE_BOUNDARY seems a bit crude in how it enlarges small
+ structs. Make structs double-word-aligned it they are a double word or
+ (potentially) larger; failing that, do the same for a size of 32 bits. */
+unsigned
+epiphany_special_round_type_align (tree type, unsigned computed,
+ unsigned specified)
+{
+ unsigned align = MAX (computed, specified);
+ tree field;
+ HOST_WIDE_INT total, max;
+ unsigned try_align = FASTEST_ALIGNMENT;
+
+ if (maximum_field_alignment && try_align > maximum_field_alignment)
+ try_align = maximum_field_alignment;
+ if (align >= try_align)
+ return align;
+ for (max = 0, field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
+ {
+ tree offset, size;
+
+ if (TREE_CODE (field) != FIELD_DECL
+ || TREE_TYPE (field) == error_mark_node)
+ continue;
+ offset = bit_position (field);
+ size = DECL_SIZE (field);
+ if (!host_integerp (offset, 1) || !host_integerp (size, 1)
+ || TREE_INT_CST_LOW (offset) >= try_align
+ || TREE_INT_CST_LOW (size) >= try_align)
+ return try_align;
+ total = TREE_INT_CST_LOW (offset) + TREE_INT_CST_LOW (size);
+ if (total > max)
+ max = total;
+ }
+ if (max >= (HOST_WIDE_INT) try_align)
+ align = try_align;
+ else if (try_align > 32 && max >= 32)
+ align = max > 32 ? 64 : 32;
+ return align;
+}
+
+/* Upping the alignment of arrays in structs is not only a performance
+ enhancement, it also helps preserve assumptions about how
+ arrays-at-the-end-of-structs work, like for struct gcov_fn_info in
+ libgcov.c . */
+unsigned
+epiphany_adjust_field_align (tree field, unsigned computed)
+{
+ if (computed == 32
+ && TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE)
+ {
+ tree elmsz = TYPE_SIZE (TREE_TYPE (TREE_TYPE (field)));
+
+ if (!host_integerp (elmsz, 1) || tree_low_cst (elmsz, 1) >= 32)
+ return 64;
+ }
+ return computed;
+}
+
+/* Output code to add DELTA to the first argument, and then jump
+ to FUNCTION. Used for C++ multiple inheritance. */
+static void
+epiphany_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset,
+ tree function)
+{
+ int this_regno
+ = aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function) ? 1 : 0;
+ const char *this_name = reg_names[this_regno];
+ const char *fname;
+
+ /* We use IP and R16 as a scratch registers. */
+ gcc_assert (call_used_regs [GPR_IP]);
+ gcc_assert (call_used_regs [GPR_16]);
+
+ /* Add DELTA. When possible use a plain add, otherwise load it into
+ a register first. */
+ if (delta == 0)
+ ; /* Done. */
+ else if (SIMM11 (delta))
+ asm_fprintf (file, "\tadd\t%s,%s,%d\n", this_name, this_name, (int) delta);
+ else if (delta < 0 && delta >= -0xffff)
+ {
+ asm_fprintf (file, "\tmov\tip,%d\n", (int) -delta);
+ asm_fprintf (file, "\tsub\t%s,%s,ip\n", this_name, this_name);
+ }
+ else
+ {
+ asm_fprintf (file, "\tmov\tip,%%low(%ld)\n", (long) delta);
+ if (delta & ~0xffff)
+ asm_fprintf (file, "\tmovt\tip,%%high(%ld)\n", (long) delta);
+ asm_fprintf (file, "\tadd\t%s,%s,ip\n", this_name, this_name);
+ }
+
+ /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
+ if (vcall_offset != 0)
+ {
+ /* ldr ip,[this] --> temp = *this
+ ldr ip,[ip,vcall_offset] > temp = *(*this + vcall_offset)
+ add this,this,ip --> this+ = *(*this + vcall_offset) */
+ asm_fprintf (file, "\tldr\tip, [%s]\n", this_name);
+ if (vcall_offset < -0x7ff * 4 || vcall_offset > 0x7ff * 4
+ || (vcall_offset & 3) != 0)
+ {
+ asm_fprintf (file, "\tmov\tr16, %%low(%ld)\n", (long) vcall_offset);
+ asm_fprintf (file, "\tmovt\tr16, %%high(%ld)\n", (long) vcall_offset);
+ asm_fprintf (file, "\tldr\tip, [ip,r16]\n");
+ }
+ else
+ asm_fprintf (file, "\tldr\tip, [ip,%d]\n", (int) vcall_offset / 4);
+ asm_fprintf (file, "\tadd\t%s, %s, ip\n", this_name, this_name);
+ }
+
+ fname = XSTR (XEXP (DECL_RTL (function), 0), 0);
+ if (epiphany_is_long_call_p (XEXP (DECL_RTL (function), 0)))
+ {
+ fputs ("\tmov\tip,%low(", file);
+ assemble_name (file, fname);
+ fputs (")\n\tmovt\tip,%high(", file);
+ assemble_name (file, fname);
+ fputs (")\n\tjr ip\n", file);
+ }
+ else
+ {
+ fputs ("\tb\t", file);
+ assemble_name (file, fname);
+ fputc ('\n', file);
+ }
+}
+
+struct gcc_target targetm = TARGET_INITIALIZER;
diff --git a/gcc/config/epiphany/epiphany.h b/gcc/config/epiphany/epiphany.h
new file mode 100644
index 00000000000..9d03ee909b8
--- /dev/null
+++ b/gcc/config/epiphany/epiphany.h
@@ -0,0 +1,881 @@
+/* Definitions of target machine for GNU compiler, Argonaut EPIPHANY cpu.
+ Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2004, 2005,
+ 2007, 2009, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_EPIPHANY_H
+#define GCC_EPIPHANY_H
+
+#undef LINK_SPEC
+#undef STARTFILE_SPEC
+#undef ENDFILE_SPEC
+#undef SIZE_TYPE
+#undef PTRDIFF_TYPE
+#undef WCHAR_TYPE
+#undef WCHAR_TYPE_SIZE
+
+/* Names to predefine in the preprocessor for this target machine. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__epiphany__"); \
+ builtin_define ("__little_endian__"); \
+ builtin_define_with_int_value ("__EPIPHANY_STACK_OFFSET__", \
+ epiphany_stack_offset); \
+ builtin_assert ("cpu=epiphany"); \
+ builtin_assert ("machine=epiphany"); \
+ } while (0)
+
+/* Pick up the libgloss library. One day we may do this by linker script, but
+ for now its static. */
+#undef LIB_SPEC
+#define LIB_SPEC "%{!shared:%{g*:-lg} %{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}} -lepiphany"
+
+#define LINK_SPEC "%{v}"
+
+#define STARTFILE_SPEC "%{!shared:crt0.o%s} crti.o%s " \
+ "%{mfp-mode=int:crtint.o%s} %{mfp-mode=truncate:crtrunc.o%s} " \
+ "%{m1reg-r43:crtm1reg-r43.o%s} %{m1reg-r63:crtm1reg-r63.o%s} " \
+ "crtbegin.o%s"
+
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+ asm (SECTION_OP "\n\
+ mov r0,%low(" USER_LABEL_PREFIX #FUNC")\n\
+ movt r0,%high(" USER_LABEL_PREFIX #FUNC")\n\
+ jalr r0\n\
+ .text");
+
+#if 0 /* We would like to use Posix for profiling, but the simulator
+ interface still lacks mkdir. */
+#define TARGET_POSIX_IO
+#endif
+
+/* Target machine storage layout. */
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+#define WORDS_BIG_ENDIAN 0
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = 1; \
+ (MODE) = SImode; \
+ }
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 64
+
+/* ALIGN FRAMES on word boundaries */
+#define EPIPHANY_STACK_ALIGN(LOC) (((LOC)+7) & ~7)
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bit-field declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* No data type wants to be aligned rounder than this. */
+/* This is bigger than currently necessary for the EPIPHANY. If 8 byte floats are
+ ever added it's not clear whether they'll need such alignment or not. For
+ now we assume they will. We can always relax it if necessary but the
+ reverse isn't true. */
+#define BIGGEST_ALIGNMENT 64
+
+/* The best alignment to use in cases where we have a choice. */
+#define FASTEST_ALIGNMENT 64
+
+#define MALLOC_ABI_ALIGNMENT BIGGEST_ALIGNMENT
+
+/* Make strings dword-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < FASTEST_ALIGNMENT) \
+ ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Make arrays of chars dword-aligned for the same reasons.
+ Also, align arrays of SImode items. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < FASTEST_ALIGNMENT \
+ ? FASTEST_ALIGNMENT \
+ : (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == SImode \
+ && (ALIGN) < FASTEST_ALIGNMENT) \
+ ? FASTEST_ALIGNMENT \
+ : (ALIGN))
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+/* On the EPIPHANY the lower address bits are masked to 0 as necessary. The chip
+ won't croak when given an unaligned address, but the insn will still fail
+ to produce the correct result. */
+#define STRICT_ALIGNMENT 1
+
+/* layout_type overrides our ADJUST_ALIGNMENT settings from epiphany-modes.def
+ for vector modes, so we have to override it back. */
+#define ROUND_TYPE_ALIGN(TYPE, MANGLED_ALIGN, SPECIFIED_ALIGN) \
+ (TREE_CODE (TYPE) == VECTOR_TYPE && !TYPE_USER_ALIGN (TYPE) \
+ && SPECIFIED_ALIGN <= GET_MODE_ALIGNMENT (TYPE_MODE (TYPE)) \
+ ? GET_MODE_ALIGNMENT (TYPE_MODE (TYPE)) \
+ : ((TREE_CODE (TYPE) == RECORD_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE \
+ || TREE_CODE (TYPE) == QUAL_UNION_TYPE) \
+ && !TYPE_PACKED (TYPE)) \
+ ? epiphany_special_round_type_align ((TYPE), (MANGLED_ALIGN), \
+ (SPECIFIED_ALIGN)) \
+ : MAX ((MANGLED_ALIGN), (SPECIFIED_ALIGN)))
+
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ epiphany_adjust_field_align((FIELD), (COMPUTED))
+
+/* Layout of source language data types. */
+
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 0
+
+#define SIZE_TYPE "long unsigned int"
+#define PTRDIFF_TYPE "long int"
+#define WCHAR_TYPE "unsigned int"
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers. */
+
+#define FIRST_PSEUDO_REGISTER 78
+
+
+/* General purpose registers. */
+#define GPR_FIRST 0 /* First gpr */
+
+#define PIC_REGNO (GPR_FIRST + 28) /* PIC register. */
+#define GPR_LAST (GPR_FIRST + 63) /* Last gpr */
+#define CORE_CONTROL_FIRST CONFIG_REGNUM
+#define CORE_CONTROL_LAST IRET_REGNUM
+
+#define GPR_P(R) IN_RANGE (R, GPR_FIRST, GPR_LAST)
+#define GPR_OR_AP_P(R) (GPR_P (R) || (R) == ARG_POINTER_REGNUM)
+
+#define GPR_OR_PSEUDO_P(R) (GPR_P (R) || (R) >= FIRST_PSEUDO_REGISTER)
+#define GPR_AP_OR_PSEUDO_P(R) (GPR_OR_AP_P (R) || (R) >= FIRST_PSEUDO_REGISTER)
+
+#define FIXED_REGISTERS \
+{ /* Integer Registers */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 000-007, gr0 - gr7 */ \
+ 0, 0, 0, 0, 0, 1, 0, 0, /* 008-015, gr8 - gr15 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 016-023, gr16 - gr23 */ \
+ 0, 0, 0, 0, 1, 1, 1, 1, /* 024-031, gr24 - gr31 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 032-039, gr32 - gr39 */ \
+ 1, 1, 1, 1, 0, 0, 0, 0, /* 040-047, gr40 - gr47 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 048-055, gr48 - gr55 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 056-063, gr56 - gr63 */ \
+ /* Other registers */ \
+ 1, /* 64 AP - fake arg ptr */ \
+ 1, /* soft frame pointer */ \
+ 1, /* CC_REGNUM - integer conditions */\
+ 1, /* CCFP_REGNUM - fp conditions */\
+ 1, 1, 1, 1, 1, 1, /* Core Control Registers. */ \
+ 1, 1, 1, /* FP_{NEAREST,...}_REGNUM */\
+ 1, /* UNKNOWN_REGNUM - placeholder. */\
+}
+
+/* Like `FIXED_REGISTERS' but has 1 for each register that is clobbered (in
+ general) by function calls as well as for fixed registers. This macro
+ therefore identifies the registers that are not available for general
+ allocation of values that must live across function calls.
+
+ If a register has 0 in `CALL_USED_REGISTERS', the compiler automatically
+ saves it on function entry and restores it on function exit, if the register
+ is used within the function. */
+
+#define CALL_USED_REGISTERS \
+{ /* Integer Registers */ \
+ 1, 1, 1, 1, 0, 0, 0, 0, /* 000-007, gr0 - gr7 */ \
+ 0, 0, 0, 0, 1, 1, 1, 0, /* 008-015, gr8 - gr15 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* 016-023, gr16 - gr23 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* 024-031, gr24 - gr31 */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 032-039, gr32 - gr38 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* 040-047, gr40 - gr47 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* 048-055, gr48 - gr55 */ \
+ 1, 1, 1, 1, 1, 1, 1, 1, /* 056-063, gr56 - gr63 */ \
+ 1, /* 64 AP - fake arg ptr */ \
+ 1, /* soft frame pointer */ \
+ 1, /* 66 CC_REGNUM */ \
+ 1, /* 67 CCFP_REGNUM */ \
+ 1, 1, 1, 1, 1, 1, /* Core Control Registers. */ \
+ 1, 1, 1, /* FP_{NEAREST,...}_REGNUM */\
+ 1, /* UNKNOWN_REGNUM - placeholder. */\
+}
+
+#define REG_ALLOC_ORDER \
+ { \
+ 0, 1, 2, 3, /* Caller-saved 'small' registers. */ \
+ 12, /* Caller-saved unpaired register. */ \
+ /* Caller-saved registers. */ \
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, \
+ 44, 45, 46, 47, \
+ 48, 49, 50, 51, 52, 53, 54, 55, \
+ 56, 57, 58, 59, 60, 61, 62, 63, \
+ 4, 5, 6, 7, /* Calle-saved 'small' registers. */ \
+ 15, /* Calle-saved unpaired register. */ \
+ 8, 9, 10, 11, /* Calle-saved registers. */ \
+ 32, 33, 34, 35, 36, 37, 38, 39, \
+ 14, 13, /* Link register, stack pointer. */ \
+ 40, 41, 42, 43, /* Usually constant, but might be made callee-saved. */ \
+ /* Can't allocate, but must name these... */ \
+ 28, 29, 30, 31, \
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77 \
+ }
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
+extern const unsigned int epiphany_hard_regno_mode_ok[];
+extern unsigned int epiphany_mode_class[];
+#define HARD_REGNO_MODE_OK(REGNO, MODE) hard_regno_mode_ok((REGNO), (MODE))
+
+/* A C expression that is nonzero if it is desirable to choose
+ register allocation so as to avoid move instructions between a
+ value of mode MODE1 and a value of mode MODE2.
+
+ If `HARD_REGNO_MODE_OK (R, MODE1)' and `HARD_REGNO_MODE_OK (R,
+ MODE2)' are ever different for any R, then `MODES_TIEABLE_P (MODE1,
+ MODE2)' must be zero. */
+
+#define MODES_TIEABLE_P(MODE1, MODE2) 1
+
+/* Register classes and constants. */
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union.
+
+ It is important that any condition codes have class NO_REGS.
+ See `register_operand'. */
+
+enum reg_class {
+ NO_REGS,
+ LR_REGS,
+ SHORT_INSN_REGS,
+ SIBCALL_REGS,
+ GENERAL_REGS,
+ CORE_CONTROL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "LR_REGS", \
+ "SHORT_INSN_REGS", \
+ "SIBCALL_REGS", \
+ "GENERAL_REGS", \
+ "CORE_CONTROL_REGS", \
+ "ALL_REGS" \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+{ /* r0-r31 r32-r63 ap/sfp/cc1/cc2/iret/status */ \
+ { 0x00000000,0x00000000,0x0}, /* NO_REGS */ \
+ { 0x00004000,0x00000000,0x0}, /* LR_REGS */ \
+ { 0x000000ff,0x00000000,0x0}, /* SHORT_INSN_REGS */ \
+ { 0xffff100f,0xffffff00,0x0}, /* SIBCALL_REGS */ \
+ { 0xffffffff,0xffffffff,0x0003}, /* GENERAL_REGS */ \
+ { 0x00000000,0x00000000,0x03f0}, /* CORE_CONTROL_REGS */ \
+ { 0xffffffff,0xffffffff,0x3fff}, /* ALL_REGS */ \
+}
+
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+extern enum reg_class epiphany_regno_reg_class[FIRST_PSEUDO_REGISTER];
+#define REGNO_REG_CLASS(REGNO) \
+(epiphany_regno_reg_class[REGNO])
+
+/* The class value for index registers, and the one for base regs. */
+#define BASE_REG_CLASS GENERAL_REGS
+#define INDEX_REG_CLASS GENERAL_REGS
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+((REGNO) < FIRST_PSEUDO_REGISTER || (unsigned) reg_renumber[REGNO] < FIRST_PSEUDO_REGISTER)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+((REGNO) < FIRST_PSEUDO_REGISTER || (unsigned) reg_renumber[REGNO] < FIRST_PSEUDO_REGISTER)
+
+
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+(CLASS)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* The letters I, J, K, L, M, N, O, P in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C. */
+
+/* 'I' is used for 16 bit unsigned.
+ 'Cal' is used for long immediates (32 bits)
+ 'K' is used for any constant up to 5 bits.
+ 'L' is used for any 11 bit signed.
+*/
+
+#define IMM16(X) (IN_RANGE ((X), 0, 0xFFFF))
+#define SIMM16(X) (IN_RANGE ((X), -65536, 65535))
+#define SIMM11(X) (IN_RANGE ((X), -1024, 1023))
+#define IMM5(X) (IN_RANGE ((X), 0, 0x1F))
+
+typedef struct GTY (()) machine_function
+{
+ unsigned args_parsed : 1;
+ unsigned pretend_args_odd : 1;
+ unsigned lr_clobbered : 1;
+ unsigned control_use_inserted : 1;
+ unsigned sw_entities_processed : 6;
+ long lr_slot_offset;
+ rtx and_mask;
+ rtx or_mask;
+ unsigned unknown_mode_uses;
+ unsigned unknown_mode_sets;
+} machine_function_t;
+
+#define MACHINE_FUNCTION(fun) (fun)->machine
+
+#define INIT_EXPANDERS epiphany_init_expanders ()
+
+/* Stack layout and stack pointer usage. */
+
+/* Define this macro if pushing a word onto the stack moves the stack
+ pointer to a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this to nonzero if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET epiphany_stack_offset
+
+/* Offset from the stack pointer register to the first location at which
+ outgoing arguments are placed. */
+#define STACK_POINTER_OFFSET epiphany_stack_offset
+
+/* Offset of first parameter from the argument pointer register value. */
+/* 4 bytes for each of previous fp, return address, and previous gp.
+ 4 byte reserved area for future considerations. */
+#define FIRST_PARM_OFFSET(FNDECL) \
+ (epiphany_stack_offset \
+ + (MACHINE_FUNCTION (DECL_STRUCT_FUNCTION (FNDECL))->pretend_args_odd \
+ ? 4 : 0))
+
+#define INCOMING_FRAME_SP_OFFSET epiphany_stack_offset
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM GPR_SP
+
+/* Base register for access to local variables of the function. */
+#define HARD_FRAME_POINTER_REGNUM GPR_FP
+
+/* Register in which static-chain is passed to a function. This must
+ not be a register used by the prologue. */
+#define STATIC_CHAIN_REGNUM GPR_IP
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+
+#define ELIMINABLE_REGS \
+{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+}
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ ((OFFSET) = epiphany_initial_elimination_offset ((FROM), (TO)))
+
+/* Function argument passing. */
+
+/* If defined, the maximum amount of space required for outgoing
+ arguments will be computed and placed into the variable
+ `current_function_outgoing_args_size'. No space will be pushed
+ onto the stack for each call; instead, the function prologue should
+ increase the stack frame size by this amount. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go. */
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+((CUM) = 0)
+
+/* The number of registers used for parameter passing. Local to this file. */
+#define MAX_EPIPHANY_PARM_REGS 4
+
+/* 1 if N is a possible register number for function argument passing. */
+#define FUNCTION_ARG_REGNO_P(N) \
+((unsigned) (N) < MAX_EPIPHANY_PARM_REGS)
+
+/* Return boolean indicating arg of type TYPE and mode MODE will be passed in
+ a reg. This includes arguments that have to be passed by reference as the
+ pointer to them is passed in a reg if one is available (and that is what
+ we're given).
+ This macro is only used in this file. */
+/* We must use partial argument passing because of the chosen mode
+ of varargs handling. */
+#define PASS_IN_REG_P(CUM, MODE, TYPE) \
+ (ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE)) < MAX_EPIPHANY_PARM_REGS)
+
+/* Tell GCC to use TARGET_RETURN_IN_MEMORY. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+#define EXIT_IGNORE_STACK 1
+
+#define EPILOGUE_USES(REGNO) epiphany_epilogue_uses (REGNO)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+#define FUNCTION_PROFILER(FILE, LABELNO)
+
+/* Given an rtx for the frame pointer,
+ return an rtx for the address of the frame. */
+#define FRAME_ADDR_RTX(frame) \
+ ((frame) == hard_frame_pointer_rtx ? arg_pointer_rtx : NULL)
+
+/* This is not only for dwarf unwind info, but also for the benefit of
+ df-scan.c to tell it that LR is live at the function start. */
+#define INCOMING_RETURN_ADDR_RTX \
+ gen_rtx_REG (Pmode, \
+ (current_function_decl != NULL \
+ && epiphany_is_interrupt_p (current_function_decl) \
+ ? IRET_REGNUM : GPR_LR))
+
+/* However, we haven't implemented the rest needed for dwarf2 unwind info. */
+#define DWARF2_UNWIND_INFO 0
+
+#define RETURN_ADDR_RTX(count, frame) \
+ (count ? NULL_RTX \
+ : gen_rtx_UNSPEC (SImode, gen_rtvec (1, const0_rtx), UNSPEC_RETURN_ADDR))
+
+/* Trampolines.
+ An epiphany trampoline looks like this:
+ mov r16,%low(fnaddr)
+ movt r16,%high(fnaddr)
+ mov ip,%low(cxt)
+ movt ip,%high(cxt)
+ jr r16 */
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 20
+
+/* Addressing modes, and classification of registers for them. */
+
+/* Maximum number of registers that can appear in a valid memory address. */
+#define MAX_REGS_PER_ADDRESS 2
+
+/* We have post_modify (load/store with update). */
+#define HAVE_POST_INCREMENT TARGET_POST_INC
+#define HAVE_POST_DECREMENT TARGET_POST_INC
+#define HAVE_POST_MODIFY_DISP TARGET_POST_MODIFY
+#define HAVE_POST_MODIFY_REG TARGET_POST_MODIFY
+
+/* Recognize any constant value that is a valid address. */
+#define CONSTANT_ADDRESS_P(X) \
+(GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST)
+
+#define RTX_OK_FOR_OFFSET_P(MODE, X) \
+ RTX_OK_FOR_OFFSET_1 (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT \
+ && epiphany_vect_align == 4 ? SImode : (MODE), X)
+#define RTX_OK_FOR_OFFSET_1(MODE, X) \
+ (GET_CODE (X) == CONST_INT \
+ && !(INTVAL (X) & (GET_MODE_SIZE (MODE) - 1)) \
+ && INTVAL (X) >= -2047 * (int) GET_MODE_SIZE (MODE) \
+ && INTVAL (X) <= 2047 * (int) GET_MODE_SIZE (MODE))
+
+/* Frame offsets cannot be evaluated till the frame pointer is eliminated. */
+#define RTX_FRAME_OFFSET_P(X) \
+ ((X) == frame_pointer_rtx \
+ || (GET_CODE (X) == PLUS && XEXP ((X), 0) == frame_pointer_rtx \
+ && CONST_INT_P (XEXP ((X), 1))))
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison. */
+#define SELECT_CC_MODE(OP, X, Y) \
+ epiphany_select_cc_mode (OP, X, Y)
+
+/* Return nonzero if SELECT_CC_MODE will never return MODE for a
+ floating point inequality comparison. */
+
+#define REVERSE_CONDITION(CODE, MODE) \
+ ((MODE) == CC_FPmode || (MODE) == CC_FP_EQmode || (MODE) == CC_FP_GTEmode \
+ || (MODE) == CC_FP_ORDmode || (MODE) == CC_FP_UNEQmode \
+ ? reverse_condition_maybe_unordered (CODE) \
+ : (MODE) == CCmode ? reverse_condition (CODE) \
+ : UNKNOWN)
+
+/* We can reverse all CCmodes with REVERSE_CONDITION. */
+#define REVERSIBLE_CC_MODE(MODE) \
+ ((MODE) == CCmode || (MODE) == CC_FPmode || (MODE) == CC_FP_EQmode \
+ || (MODE) == CC_FP_GTEmode || (MODE) == CC_FP_ORDmode \
+ || (MODE) == CC_FP_UNEQmode)
+
+/* Costs. */
+
+/* The cost of a branch insn. */
+/* ??? What's the right value here? Branches are certainly more
+ expensive than reg->reg moves. */
+#define BRANCH_COST(speed_p, predictable_p) \
+ (speed_p ? epiphany_branch_cost : 1)
+
+/* Nonzero if access to memory by bytes is slow and undesirable.
+ For RISC chips, it means that access to memory by bytes is no
+ better than access by words when possible, so grab a whole word
+ and maybe make use of that. */
+#define SLOW_BYTE_ACCESS 1
+
+/* Define this macro if it is as good or better to call a constant
+ function address than to call an address kept in a register. */
+/* On the EPIPHANY, calling through registers is slow. */
+#define NO_FUNCTION_CSE
+
+/* Section selection. */
+/* WARNING: These section names also appear in dwarf2out.c. */
+
+#define TEXT_SECTION_ASM_OP "\t.section .text"
+#define DATA_SECTION_ASM_OP "\t.section .data"
+
+#undef READONLY_DATA_SECTION_ASM_OP
+#define READONLY_DATA_SECTION_ASM_OP "\t.section .rodata"
+
+#define BSS_SECTION_ASM_OP "\t.section .bss"
+
+/* Define this macro if jump tables (for tablejump insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used.
+ This macro is irrelevant if there is no separate readonly data section. */
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+/* PIC */
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. In some cases this register is defined by a
+ processor's ``application binary interface'' (ABI). When this macro
+ is defined, RTL is generated for this register once, as with the stack
+ pointer and frame pointer registers. If this macro is not defined, it
+ is up to the machine-dependent files to allocate such a register (if
+ necessary). */
+#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? PIC_REGNO : INVALID_REGNUM)
+
+/* Control the assembler format that we output. */
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will
+ end at the end of the line. */
+#define ASM_COMMENT_START ";"
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+#define ASM_APP_OFF ""
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.global\t"
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "fp", "ip", "sp", "lr", "r15", \
+ "r16", "r17","r18", "r19", "r20", "r21", "r22", "r23", \
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", \
+ "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", \
+ "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", \
+ "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55", \
+ "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63", \
+ "ap", "sfp", "cc1", "cc2", \
+ "config", "status", "lc", "ls", "le", "iret", \
+ "fp_near", "fp_trunc", "fp_anyfp", "unknown" \
+}
+
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ epiphany_final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define LOCAL_LABEL_PREFIX "."
+
+/* A C expression which evaluates to true if CODE is a valid
+ punctuation character for use in the `PRINT_OPERAND' macro. */
+extern char epiphany_punct_chars[256];
+#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \
+ epiphany_punct_chars[(unsigned char) (CHAR)]
+
+/* This is how to output an element of a case-vector that is absolute. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+do { \
+ if (CASE_VECTOR_MODE == Pmode) \
+ asm_fprintf ((FILE), "\t.word %LL%d\n", (VALUE)); \
+ else \
+ asm_fprintf ((FILE), "\t.short %LL%d\n", (VALUE)); \
+} while (0)
+
+/* This is how to output an element of a case-vector that is relative. */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+do { \
+ if (CASE_VECTOR_MODE == Pmode) \
+ asm_fprintf ((FILE), "\t.word"); \
+ else \
+ asm_fprintf ((FILE), "\t.short"); \
+ asm_fprintf ((FILE), " %LL%d-%LL%d\n", (VALUE), (REL)); \
+} while (0)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+do { if ((LOG) != 0) fprintf (FILE, "\t.balign %d\n", 1 << (LOG)); } while (0)
+
+/* Debugging information. */
+
+/* Generate DBX and DWARF debugging information. */
+#define DBX_DEBUGGING_INFO 1
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Turn off splitting of long stabs. */
+#define DBX_CONTIN_LENGTH 0
+
+/* Miscellaneous. */
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE (TARGET_SMALL16 && optimize_size ? HImode : Pmode)
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, UNKNOWN if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 8
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+
+#define Pmode SImode
+
+/* A function address in a call instruction. */
+#define FUNCTION_MODE SImode
+
+/* EPIPHANY function types. */
+enum epiphany_function_type
+{
+ EPIPHANY_FUNCTION_UNKNOWN, EPIPHANY_FUNCTION_NORMAL,
+ /* These are interrupt handlers. The name corresponds to the register
+ name that contains the return address. */
+ EPIPHANY_FUNCTION_ILINK1, EPIPHANY_FUNCTION_ILINK2,
+ /* These are interrupt handlers. The name corresponds to which type
+ of interrupt handler we're dealing with. */
+ EPIPHANY_FUNCTION_RESET, EPIPHANY_FUNCTION_SOFTWARE_EXCEPTION,
+ EPIPHANY_FUNCTION_TIMER, EPIPHANY_FUNCTION_DMA0,
+ EPIPHANY_FUNCTION_DMA1, EPIPHANY_FUNCTION_STATIC_FLAG,
+ EPIPHANY_FUNCTION_SWI
+};
+
+#define EPIPHANY_INTERRUPT_P(TYPE) \
+ ((TYPE) >= EPIPHANY_FUNCTION_RESET && (TYPE) <= EPIPHANY_FUNCTION_SWI)
+
+/* Compute the type of a function from its DECL. */
+
+#define IMMEDIATE_PREFIX "#"
+
+#define OPTIMIZE_MODE_SWITCHING(ENTITY) \
+ (epiphany_optimize_mode_switching (ENTITY))
+
+/* We have two fake entities for lazy code motion of the mask constants,
+ one entity each for round-to-nearest / truncating
+ with a different idea what FP_MODE_ROUND_UNKNOWN will be, and
+ finally an entity that runs in a second mode switching pass to
+ resolve FP_MODE_ROUND_UNKNOWN. */
+#define NUM_MODES_FOR_MODE_SWITCHING \
+ { 2, 2, FP_MODE_NONE, FP_MODE_NONE, FP_MODE_NONE, FP_MODE_NONE, FP_MODE_NONE }
+
+#define MODE_NEEDED(ENTITY, INSN) epiphany_mode_needed((ENTITY), (INSN))
+
+#define MODE_PRIORITY_TO_MODE(ENTITY, N) \
+ (epiphany_mode_priority_to_mode ((ENTITY), (N)))
+
+#define EMIT_MODE_SET(ENTITY, MODE, HARD_REGS_LIVE) \
+ emit_set_fp_mode ((ENTITY), (MODE), (HARD_REGS_LIVE))
+
+#define MODE_ENTRY(ENTITY) (epiphany_mode_entry_exit ((ENTITY), false))
+#define MODE_EXIT(ENTITY) (epiphany_mode_entry_exit ((ENTITY), true))
+#define MODE_AFTER(LAST_MODE, INSN) \
+ (epiphany_mode_after (e, (LAST_MODE), (INSN)))
+
+#define TARGET_INSERT_MODE_SWITCH_USE epiphany_insert_mode_switch_use
+
+/* Mode switching entities. */
+enum
+{
+ EPIPHANY_MSW_ENTITY_AND,
+ EPIPHANY_MSW_ENTITY_OR,
+ EPIPHANY_MSW_ENTITY_NEAREST,
+ EPIPHANY_MSW_ENTITY_TRUNC,
+ EPIPHANY_MSW_ENTITY_ROUND_UNKNOWN,
+ EPIPHANY_MSW_ENTITY_ROUND_KNOWN,
+ EPIPHANY_MSW_ENTITY_FPU_OMNIBUS
+};
+
+extern int epiphany_normal_fp_rounding;
+extern struct rtl_opt_pass pass_mode_switch_use;
+extern struct rtl_opt_pass pass_resolve_sw_modes;
+
+/* This will need to be adjusted when FP_CONTRACT_ON is properly
+ implemented. */
+#define TARGET_FUSED_MADD (flag_fp_contract_mode == FP_CONTRACT_FAST)
+
+#endif /* !GCC_EPIPHANY_H */
diff --git a/gcc/config/epiphany/epiphany.md b/gcc/config/epiphany/epiphany.md
new file mode 100644
index 00000000000..c8354e8eddb
--- /dev/null
+++ b/gcc/config/epiphany/epiphany.md
@@ -0,0 +1,2447 @@
+;; Machine description of the Adaptiva epiphany cpu for GNU C compiler
+;; Copyright (C) 1994, 1997, 1998, 1999, 2000, 2004, 2005, 2007, 2009, 2010,
+;; 2011 Free Software Foundation, Inc.
+;; Contributed by Embecosm on behalf of Adapteva, Inc.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+(define_constants
+ [(GPR_0 0)
+ (GPR_FP 11)
+ (GPR_IP 12)
+ (GPR_SP 13)
+ (GPR_LR 14)
+ (GPR_16 16)
+ (GPR_18 18)
+ (GPR_20 20)
+ (ARG_POINTER_REGNUM 64)
+ (FRAME_POINTER_REGNUM 65)
+ (CC_REGNUM 66) ;; 66 or 17
+ (CCFP_REGNUM 67) ;; 67 or 18
+ (CONFIG_REGNUM 68)
+ (STATUS_REGNUM 69)
+ (LC_REGNUM 70)
+ (LS_REGNUM 71)
+ (LE_REGNUM 72)
+ (IRET_REGNUM 73)
+ (FP_NEAREST_REGNUM 74)
+ (FP_TRUNCATE_REGNUM 75)
+ (FP_ANYFP_REGNUM 76)
+ (UNKNOWN_REGNUM 77) ; used for addsi3_r and friends
+ ; We represent the return address as an unspec rather than a reg.
+ ; If we used a reg, we could use register elimination, but eliminating
+ ; to GPR_LR would make the latter visible to dataflow, thus making it
+ ; harder to determine when it must be saved.
+ (UNSPEC_RETURN_ADDR 0)
+ (UNSPEC_FP_MODE 1)
+
+ (UNSPECV_GID 0)
+ (UNSPECV_GIE 1)])
+
+;; Insn type. Used to default other attribute values.
+
+(define_attr "type"
+ "move,load,store,cmove,unary,compare,shift,mul,uncond_branch,branch,call,fp,fp_int,misc,sfunc,fp_sfunc,flow"
+ (const_string "misc"))
+
+;; Length (in # bytes)
+
+(define_attr "length" "" (const_int 4))
+
+;; The length here is the length of a single asm.
+
+(define_asm_attributes
+ [(set_attr "length" "4")
+ (set_attr "type" "misc")])
+
+;; pipeline model; so far we have only one.
+(define_attr "pipe_model" "epiphany" (const_string "epiphany"))
+
+(define_attr "rounding" "trunc,nearest"
+ (cond [(ne (symbol_ref "TARGET_ROUND_NEAREST") (const_int 0))
+ (const_string "nearest")]
+ (const_string "trunc")))
+
+(define_attr "fp_mode" "round_unknown,round_nearest,round_trunc,int,caller,none"
+ (cond [(eq_attr "type" "fp,fp_sfunc")
+ (symbol_ref "(enum attr_fp_mode) epiphany_normal_fp_rounding")
+ (eq_attr "type" "call")
+ (symbol_ref "(enum attr_fp_mode) epiphany_normal_fp_mode")
+ (eq_attr "type" "fp_int")
+ (const_string "int")]
+ (const_string "none")))
+
+(include "epiphany-sched.md")
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; modes that are held in a single register, and hence, a word.
+(define_mode_iterator WMODE [SI SF HI QI V2HI V4QI])
+(define_mode_iterator WMODE2 [SI SF HI QI V2HI V4QI])
+
+;; modes that are held in a two single registers
+(define_mode_iterator DWMODE [DI DF V2SI V2SF V4HI V8QI])
+
+;; Double-word mode made up of two single-word mode values.
+(define_mode_iterator DWV2MODE [V2SI V2SF])
+(define_mode_attr vmode_part [(V2SI "si") (V2SF "sf")])
+(define_mode_attr vmode_PART [(V2SI "SI") (V2SF "SF")])
+(define_mode_attr vmode_fp_type [(V2SI "fp_int") (V2SF "fp")])
+(define_mode_attr vmode_ccmode [(V2SI "CC") (V2SF "CC_FP")])
+(define_mode_attr vmode_cc [(V2SI "CC_REGNUM") (V2SF "CCFP_REGNUM")])
+
+;; Move instructions.
+
+(define_expand "mov<mode>"
+ [(set (match_operand:WMODE 0 "general_operand" "")
+ (match_operand:WMODE 1 "general_operand" ""))]
+ ""
+{
+ if (<MODE>mode == V4QImode || <MODE>mode == V2HImode)
+ {
+ operands[0] = simplify_gen_subreg (SImode, operands[0], <MODE>mode, 0);
+ operands[1] = simplify_gen_subreg (SImode, operands[1], <MODE>mode, 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ if (<MODE>mode == SImode
+ && (operands[1] == frame_pointer_rtx || operands[1] == arg_pointer_rtx))
+ {
+ rtx reg = operands[0];
+
+ if (!REG_P (reg))
+ reg = gen_reg_rtx (SImode);
+ emit_insn (gen_move_frame (reg, operands[1]));
+ operands[1] = reg;
+ if (operands[0] == reg)
+ DONE;
+ }
+})
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "move_dest_operand" "=Rcs, r, r,r,m")
+ (match_operand:QI 1 "move_src_operand" "Rcs,rU16,Cal,m,r"))]
+;; ??? Needed?
+ "gpr_operand (operands[0], QImode)
+ || gpr_operand (operands[1], QImode)"
+ "@
+ mov %0,%1
+ mov %0,%1
+ mov %0,%1
+ ldrb %0,%1
+ strb %1,%0"
+ [(set_attr "type" "move,move,move,load,store")])
+
+(define_insn_and_split "*movhi_insn"
+ [(set (match_operand:HI 0 "move_dest_operand" "=r, r,r,m")
+ (match_operand:HI 1 "move_src_operand""rU16,Cal,m,r"))]
+ "gpr_operand (operands[0], HImode)
+ || gpr_operand (operands[1], HImode)"
+ "@
+ mov %0,%1
+ mov %0,%%low(%1); %1
+ ldrh %0,%c1
+ strh %1,%c0"
+ "reload_completed && CONSTANT_P (operands[1])
+ && !satisfies_constraint_U16 (operands[1]) && TARGET_SPLIT_LOHI"
+ [(set (match_dup 2) (match_dup 3))]
+ "operands[2] = simplify_gen_subreg (SImode, operands[0], HImode, 0);
+ operands[3] = simplify_gen_subreg (SImode, operands[1], HImode, 0);"
+ [(set_attr "type" "move,move,load,store")])
+
+;; We use a special pattern for a move from the frame pointer to
+;; show the flag clobber that is needed when this move is changed
+;; to an add by register elimination.
+;; ??? A pseudo register might be equivalent to a function invariant,
+;; and thus placed by reload into reg_equiv_invariant; if the pseudo
+;; does not get a hard register, we then end up with the function
+;; invariant in its place, i.e. an unexpected clobber of the flags
+;; register.
+;;
+;; N.B. operand 1 is an operand so that reload will perform elimination.
+;;
+;; The post-reload pattern recognition and splitting is done in frame_move_1.
+(define_insn "move_frame"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (match_operand:SI 1 "register_operand" "r"))
+ (clobber (reg:CC CC_REGNUM))]
+ "operands[1] == frame_pointer_rtx || operands[1] == arg_pointer_rtx"
+ "#")
+
+(define_insn "movsi_high"
+ [(set (match_operand:SI 0 "gpr_operand" "+r")
+ (ior:SI (and:SI (match_dup 0) (const_int 65535))
+ (high:SI (match_operand:SI 1 "move_src_operand" "i"))))]
+ ""
+ "movt %0, %%high(%1)"
+ [(set_attr "type" "move")
+ (set_attr "length" "4")])
+
+(define_insn "movsi_lo_sum"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (lo_sum:SI (const_int 0)
+ (match_operand:SI 1 "move_src_operand" "i")))]
+ ""
+ "mov %0, %%low(%1)"
+ [(set_attr "type" "move")
+ (set_attr "length" "4")])
+
+(define_insn_and_split "*movsi_insn"
+ [(set (match_operand:SI 0 "move_dest_operand"
+ "= r, r, r, r, r, r, m, r, Rct")
+ (match_operand:SI 1 "move_src_operand"
+ "rU16Rra,Cm1,Cl1,Cr1,Cal,mSra,rRra,Rct,r"))]
+ "gpr_operand (operands[0], SImode)
+ || gpr_operand (operands[1], SImode)
+ || satisfies_constraint_Sra (operands[1])"
+{
+ switch (which_alternative)
+ {
+ case 0: return "mov %0,%1";
+ case 1: return "add %0,%-,(1+%1)";
+ case 2: operands[1] = GEN_INT (exact_log2 (-INTVAL (operands[1])));
+ return "lsl %0,%-,%1";
+ case 3: operands[1] = GEN_INT (32 - exact_log2 (INTVAL (operands[1]) + 1));
+ return "lsr %0,%-,%1";
+ case 4: return "mov %0,%%low(%1)\;movt %0,%%high(%1) ; %1";
+ case 5: return "ldr %0,%C1";
+ case 6: return "str %1,%C0";
+ case 7: return "movfs %0,%1";
+ case 8: return "movts %0,%1";
+ default: gcc_unreachable ();
+ }
+}
+ "reload_completed && CONSTANT_P (operands[1])
+ && !satisfies_constraint_U16 (operands[1])
+ && !satisfies_constraint_Cm1 (operands[1])
+ && !satisfies_constraint_Cl1 (operands[1])
+ && !satisfies_constraint_Cr1 (operands[1])
+ && TARGET_SPLIT_LOHI"
+ [(match_dup 2) (match_dup 3)]
+ "operands[2] = gen_movsi_lo_sum (operands[0], operands[1]);
+ operands[3] = gen_movsi_high (operands[0], operands[1]);"
+ [(set_attr "type" "move,misc,misc,misc,move,load,store,flow,flow")
+ (set_attr "length" "4,4,4,4,8,4,4,4,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "nonimmediate_operand")
+ (unspec:SI [(const_int 0)] UNSPEC_RETURN_ADDR))]
+ "reload_completed && !MACHINE_FUNCTION (cfun)->lr_clobbered"
+ [(set (match_dup 0) (reg:SI GPR_LR))])
+
+(define_split
+ [(set (match_operand:SI 0 "gpr_operand")
+ (unspec:SI [(const_int 0)] UNSPEC_RETURN_ADDR))]
+ "reload_completed"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ emit_insn (gen_reload_insi_ra (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "reload_insi_ra"
+ [(set (match_operand:SI 0 "gpr_operand" "r") (match_operand:SI 1 "" "Sra"))]
+ ""
+{
+ rtx addr
+ = (frame_pointer_needed ? hard_frame_pointer_rtx : stack_pointer_rtx);
+
+ addr = plus_constant (addr, MACHINE_FUNCTION (cfun)->lr_slot_offset);
+ operands[1] = gen_frame_mem (SImode, addr);
+})
+
+;; If the frame pointer elimination offset is zero, we'll use this pattern.
+;; Note that the splitter can accept any gpr in operands[1]; this is
+;; necessary, (e.g. for compile/20021015-1.c -O0,)
+;; because when register elimination cannot be done with the constant
+;; as an immediate operand of the add instruction, reload will resort to
+;; loading the constant into a reload register, using gen_add2_insn to add
+;; the stack pointer, and then use the reload register as new source in
+;; the move_frame pattern.
+(define_insn_and_split "*move_frame_1"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (match_operand:SI 1 "gpr_operand" "r"))
+ (clobber (reg:CC CC_REGNUM))]
+ "(reload_in_progress || reload_completed)
+ && (operands[1] == stack_pointer_rtx
+ || operands[1] == hard_frame_pointer_rtx)"
+ "#"
+ "reload_in_progress || reload_completed"
+ [(set (match_dup 0) (match_dup 1))])
+
+(define_expand "mov<mode>"
+ [(set (match_operand:DWMODE 0 "general_operand" "")
+ (match_operand:DWMODE 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_MODE_CLASS (<MODE>mode) == MODE_VECTOR_INT
+ || GET_MODE_CLASS (<MODE>mode) == MODE_VECTOR_FLOAT)
+ {
+ if (epiphany_vect_align == 4 && TARGET_SPLIT_VECMOVE_EARLY)
+ {
+ rtx o0l, o0h, o1l, o1h;
+
+ o0l = simplify_gen_subreg (SImode, operands[0], <MODE>mode, 0);
+ o0h = simplify_gen_subreg (SImode, operands[0], <MODE>mode,
+ UNITS_PER_WORD);
+ o1l = simplify_gen_subreg (SImode, operands[1], <MODE>mode, 0);
+ o1h = simplify_gen_subreg (SImode, operands[1], <MODE>mode,
+ UNITS_PER_WORD);
+ if (reg_overlap_mentioned_p (o0l, o1h))
+ {
+ emit_move_insn (o0h, o1h);
+ emit_move_insn (o0l, o1l);
+ }
+ else
+ {
+ emit_move_insn (o0l, o1l);
+ emit_move_insn (o0h, o1h);
+ }
+ DONE;
+ }
+ /* lower_subreg has a tendency to muck up vectorized code.
+ To protect the wide memory accesses, we must use same-size
+ subregs. */
+ if (epiphany_vect_align != 4 /* == 8 */
+ && !reload_in_progress
+ && (GET_CODE (operands[0]) == MEM || GET_CODE (operands[1]) == MEM)
+ && (GET_CODE (operands[0]) != SUBREG
+ || (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[0])))
+ != GET_MODE_SIZE (<MODE>mode)
+ && GET_CODE (operands[1]) != SUBREG)))
+ {
+ operands[0]
+ = simplify_gen_subreg (DImode, operands[0], <MODE>mode, 0);
+ operands[1]
+ = simplify_gen_subreg (DImode, operands[1], <MODE>mode, 0);
+ emit_insn (gen_movdi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+}")
+
+(define_insn_and_split "*mov<mode>_insn"
+ [(set (match_operand:DWMODE 0 "move_dest_operand" "=r, r,r,m")
+ (match_operand:DWMODE 1 "move_double_src_operand" "r,CalE,m,r"))]
+ "(gpr_operand (operands[0], <MODE>mode)
+ || gpr_operand (operands[1], <MODE>mode))"
+ "@
+ #
+ #
+ ldrd %0,%X1
+ strd %1,%X0"
+ "reload_completed
+ && ((!MEM_P (operands[0]) && !MEM_P (operands[1]))
+ || epiphany_vect_align == 4)"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+{
+ int word0 = 0, word1 = UNITS_PER_WORD;
+
+ if (post_modify_operand (operands[0], <MODE>mode)
+ || post_modify_operand (operands[1], <MODE>mode))
+ word0 = UNITS_PER_WORD, word1 = 0;
+
+ operands[2] = simplify_gen_subreg (SImode, operands[0], <MODE>mode, word0);
+ operands[3] = simplify_gen_subreg (SImode, operands[1], <MODE>mode, word0);
+ operands[4] = simplify_gen_subreg (SImode, operands[0], <MODE>mode, word1);
+ operands[5] = simplify_gen_subreg (SImode, operands[1], <MODE>mode, word1);
+ if (post_modify_operand (operands[0], <MODE>mode))
+ operands[2]
+ = change_address (operands[2], VOIDmode,
+ plus_constant (XEXP (XEXP (operands[0], 0), 0),
+ UNITS_PER_WORD));
+ if (post_modify_operand (operands[1], <MODE>mode))
+ operands[3]
+ = change_address (operands[3], VOIDmode,
+ plus_constant (XEXP (XEXP (operands[1], 0), 0),
+ UNITS_PER_WORD));
+}
+ [(set_attr "type" "move,move,load,store")
+ (set_attr "length" "8,16,4,4")])
+
+
+(define_insn_and_split "*movsf_insn"
+ [(set (match_operand:SF 0 "move_dest_operand" "=r,r,r,m")
+ (match_operand:SF 1 "move_src_operand" "r,E,m,r"))]
+ "gpr_operand (operands[0], SFmode)
+ || gpr_operand (operands[1], SFmode)"
+ "@
+ mov %0,%1
+ mov %0,%%low(%1)\;movt %0,%%high(%1) ; %1
+ ldr %0,%C1
+ str %1,%C0"
+ "reload_completed && CONSTANT_P (operands[1]) && TARGET_SPLIT_LOHI"
+ [(set (match_dup 2) (match_dup 3))]
+ "operands[2] = simplify_gen_subreg (SImode, operands[0], SFmode, 0);
+ operands[3] = simplify_gen_subreg (SImode, operands[1], SFmode, 0);"
+ [(set_attr "type" "move,move,load,store")
+ (set_attr "length" "4,8,4,4")])
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "add_reg_operand" "")
+ (plus:SI (match_operand:SI 1 "add_reg_operand" "")
+ (match_operand:SI 2 "add_operand" "")))]
+ ""
+ "
+{
+ if (reload_in_progress || reload_completed)
+ emit_insn (gen_addsi3_r (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_addsi3_i (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "addsi3_i"
+ [(set (match_operand:SI 0 "add_reg_operand" "=r")
+ (plus:SI (match_operand:SI 1 "add_reg_operand" "%r")
+ (match_operand:SI 2 "add_operand" "rL")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "add %0,%1,%2"
+[(set_attr "type" "misc")])
+
+; We use a clobber of UNKNOWN_REGNUM here so that the peephole optimizers
+; can identify the unresolved flags clobber problem, and also to
+; avoid unwanted matches.
+;
+; At -O0 / -O1 we don't peephole all instances away. We could get better
+; debug unwinding through the emitted code if we added a splitter.
+(define_insn "addsi3_r"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (plus:SI (match_operand:SI 1 "gpr_operand" "%r")
+ (match_operand:SI 2 "nonmemory_operand" "rCar")))
+ (clobber (reg:CC UNKNOWN_REGNUM))]
+ "reload_in_progress || reload_completed"
+{
+ int scratch = (0x17
+ ^ (true_regnum (operands[0]) & 1)
+ ^ (true_regnum (operands[1]) & 2)
+ ^ (true_regnum (operands[2]) & 4));
+ asm_fprintf (asm_out_file, "\tstr r%d,[sp,#0]\n", scratch);
+ asm_fprintf (asm_out_file, "\tmovfs r%d,status\n", scratch);
+ output_asm_insn ("add %0,%1,%2", operands);
+ asm_fprintf (asm_out_file, "\tmovts status,r%d\n", scratch);
+ asm_fprintf (asm_out_file, "\tldr r%d,[sp,#0]\n", scratch);
+ return "";
+}
+ [(set_attr "length" "20")
+ (set_attr "type" "misc")])
+
+;; reload uses gen_addsi2 because it doesn't understand the need for
+;; the clobber.
+(define_peephole2
+ [(set (match_operand:SI 0 "gpr_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))
+ (parallel [(set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (match_operand:SI 2 "gpr_operand")))
+ (clobber (reg:CC UNKNOWN_REGNUM))])]
+ "satisfies_constraint_L (operands[1])
+ || ((operands[2] == stack_pointer_rtx
+ || (operands[2] == hard_frame_pointer_rtx && frame_pointer_needed))
+ && !peep2_regno_dead_p (2, CC_REGNUM)
+ && satisfies_constraint_Car (operands[1]))"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (match_dup 2) (match_dup 1)))
+ (clobber (reg:CC UNKNOWN_REGNUM))])]
+ ;; FIXME:
+ ;; need this patch: http://gcc.gnu.org/ml/gcc-patches/2011-10/msg02819.html
+ ;; "peep2_rescan = true;"
+)
+
+(define_peephole2
+ [(match_parallel 5 ""
+ [(set (match_operand 3 "cc_operand" "") (match_operand 4 "" ""))])
+ (parallel [(set (match_operand:SI 0 "gpr_operand" "")
+ (plus:SI (match_operand:SI 1 "gpr_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC UNKNOWN_REGNUM))])]
+ "REGNO (operands[3]) == CC_REGNUM
+ && (gpr_operand (operands[2], SImode)
+ || satisfies_constraint_L (operands[2]))
+ && !reg_overlap_mentioned_p (operands[0], operands[5])
+ && !reg_set_p (operands[1], operands[5])
+ && !reg_set_p (operands[2], operands[5])"
+ [(parallel [(set (match_operand:SI 0 "gpr_operand" "")
+ (plus:SI (match_operand:SI 1 "gpr_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])
+ (match_dup 5)]
+ "")
+
+(define_peephole2
+ [(parallel [(set (match_operand:SI 0 "gpr_operand" "")
+ (plus:SI (match_operand:SI 1 "gpr_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC UNKNOWN_REGNUM))])]
+ "peep2_regno_dead_p (1, CC_REGNUM)
+ && (gpr_operand (operands[2], SImode)
+ || satisfies_constraint_L (operands[2]))"
+ [(parallel [(set (match_operand:SI 0 "gpr_operand" "")
+ (plus:SI (match_operand:SI 1 "gpr_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))
+ (clobber (reg:CC CC_REGNUM))])]
+ "")
+
+(define_peephole2
+ [(parallel [(set (match_operand:SI 0 "gpr_operand" "")
+ (plus:SI (reg:SI GPR_SP)
+ (match_operand:SI 1 "nonmemory_operand" "")))
+ (clobber (reg:CC UNKNOWN_REGNUM))])]
+ "(REG_P (operands[1]) && !reg_overlap_mentioned_p (operands[0], operands[1]))
+ || RTX_OK_FOR_OFFSET_P (<MODE>mode, operands[1])"
+ [(set (match_dup 0) (reg:SI GPR_SP))
+ (set (mem:WMODE (post_modify (match_dup 0)
+ (plus:SI (match_dup 0) (match_dup 1))))
+ (reg:WMODE GPR_SP))]
+ "")
+
+
+
+(define_peephole2
+ [(parallel [(set (match_operand:SI 0 "gpr_operand" "")
+ (plus:SI (reg:SI GPR_FP)
+ (match_operand:SI 1 "nonmemory_operand" "")))
+ (clobber (reg:CC UNKNOWN_REGNUM))])
+ (match_scratch:WMODE 2 "r")]
+ "frame_pointer_needed
+ && ((REG_P (operands[1])
+ && !reg_overlap_mentioned_p (operands[0], operands[1]))
+ || RTX_OK_FOR_OFFSET_P (<MODE>mode, operands[1]))"
+ [(set (match_dup 0) (reg:SI GPR_FP))
+ (set (match_dup 2)
+ (mem:WMODE (post_modify (match_dup 0)
+ (plus:SI (match_dup 0) (match_dup 1)))))]
+ "")
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (minus:SI (match_operand:SI 1 "add_reg_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rL")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "sub %0,%1,%2"
+ [(set_attr "type" "misc")])
+
+; After mode-switching, floating point operations, fp_sfuncs and calls
+; must exhibit the use of the control register, lest the setting of the
+; control register could be deleted or moved. OTOH a use of a hard register
+; greatly coundounds optimizers like the rtl loop optimizers or combine.
+; Therefore, we put an extra pass immediately after the mode switching pass
+; that inserts the USEs of the control registers, and sets a flag in struct
+; machine_function that float_operation can henceforth only match with that
+; USE.
+
+;; Addition
+(define_expand "addsf3"
+ [(parallel
+ [(set (match_operand:SF 0 "gpr_operand" "")
+ (plus:SF (match_operand:SF 1 "gpr_operand" "")
+ (match_operand:SF 2 "gpr_operand" "")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])])
+
+(define_insn "*addsf3_i"
+ [(match_parallel 3 "float_operation"
+ [(set (match_operand:SF 0 "gpr_operand" "=r")
+ (plus:SF (match_operand:SF 1 "gpr_operand" "%r")
+ (match_operand:SF 2 "gpr_operand" "r")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "fadd %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+;; Subtraction
+(define_expand "subsf3"
+ [(parallel
+ [(set (match_operand:SF 0 "gpr_operand" "")
+ (minus:SF (match_operand:SF 1 "gpr_operand" "")
+ (match_operand:SF 2 "gpr_operand" "")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])])
+
+(define_insn "*subsf3_i"
+ [(match_parallel 3 "float_operation"
+ [(set (match_operand:SF 0 "gpr_operand" "=r")
+ (minus:SF (match_operand:SF 1 "gpr_operand" "r")
+ (match_operand:SF 2 "gpr_operand" "r")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "fsub %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_expand "subsf3_f"
+ [(parallel
+ [(set (reg:CC_FP CCFP_REGNUM)
+ (compare:CC_FP (match_operand:SF 1 "gpr_operand" "r")
+ (match_operand:SF 2 "gpr_operand" "r")))
+ (set (match_operand:SF 0 "gpr_operand" "=r")
+ (minus:SF (match_dup 1) (match_dup 2)))])]
+ "!TARGET_SOFT_CMPSF")
+
+(define_insn "*subsf3_f_i"
+ [(match_parallel 3 "float_operation"
+ [(set (reg:CC_FP CCFP_REGNUM)
+ (compare:CC_FP (match_operand:SF 1 "gpr_operand" "r")
+ (match_operand:SF 2 "gpr_operand" "r")))
+ (set (match_operand:SF 0 "gpr_operand" "=r")
+ (minus:SF (match_dup 1) (match_dup 2)))])]
+ "!TARGET_SOFT_CMPSF"
+ "fsub %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+; There is an fabs instruction, but it has longer latency.
+(define_expand "abssf2"
+ [(set (match_operand:SF 0 "gpr_operand" "")
+ (abs:SF (match_operand:SF 1 "gpr_operand" "")))]
+ ""
+ "
+{
+ rtx op1 = copy_to_mode_reg (SImode, simplify_gen_subreg (SImode, operands[1],
+ SFmode, 0));
+ rtx op0 = simplify_gen_subreg (SImode, operands[0], SFmode, 0);
+
+ emit_insn (gen_ashlsi3 (op1, op1, const1_rtx));
+ emit_insn (gen_lshrsi3 (op0, op1, const1_rtx));
+ DONE;
+}")
+
+;; Multiplication
+(define_expand "mulsf3"
+ [(parallel
+ [(set (match_operand:SF 0 "gpr_operand" "")
+ (mult:SF (match_operand:SF 1 "gpr_operand" "")
+ (match_operand:SF 2 "gpr_operand" "")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])])
+
+(define_insn "*mulsf3_i"
+ [(match_parallel 3 "float_operation"
+ [(set (match_operand:SF 0 "gpr_operand" "=r")
+ (mult:SF (match_operand:SF 1 "gpr_operand" "%r")
+ (match_operand:SF 2 "gpr_operand" "r")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "fmul %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+;; Division
+(define_expand "divsf3"
+ [(set (match_operand:SF 0 "gpr_operand" "")
+ (div:SF (match_operand:SF 1 "gpr_operand" "")
+ (match_operand:SF 2 "gpr_operand" "")))]
+ "flag_reciprocal_math"
+{
+ rtx one = CONST1_RTX (SFmode);
+ rtx dst = operands[0];
+
+ if (rtx_equal_p (dst, operands[1]))
+ {
+ emit_move_insn (dst, one);
+ DONE;
+ }
+ else if (!register_operand (dst, SFmode) && can_create_pseudo_p ())
+ dst = gen_reg_rtx (SFmode);
+ emit_insn (gen_recipsf2 (dst, one, operands[2],
+ sfunc_symbol (\"__fast_recipsf2\")));
+ emit_insn (gen_mulsf3 (operands[0], operands[1], dst));
+ DONE;
+})
+
+;; Before reload, keep the hard reg usage to clobbers so that the loop
+;; optimizers can more easily move this insn.
+;; It would be nicer to use a constraint for a GPR_0 - only register class,
+;; but sched1 can still cause trouble then, and there is no guarantee of
+;; better register allocations.
+;; Neither is there when using the opposite strategy - putting explicit
+;; hard register references into pre-reload rtl.
+(define_expand "recipsf2"
+ [(parallel
+ [(set (match_operand:SF 0 "gpr_operand" "")
+ (div:SF (match_operand:SF 1 "const_float_1_operand" "")
+ (match_operand:SF 2 "move_src_operand" "")))
+ (use (match_operand:SI 3 "move_src_operand" ""))
+ (clobber (reg:SF 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SF GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_20))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])])
+
+(define_insn_and_split "*recipsf2_1"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SF 0 "gpr_operand" "=r,r")
+ (div:SF (match_operand:SF 1 "const_float_1_operand" "")
+ (match_operand:SF 2 "move_src_operand" "rU16m,rU16mCal")))
+ (use (match_operand:SI 3 "move_src_operand" "rU16m,rU16mCal"))
+ (clobber (reg:SF 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SF GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_20))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ "flag_reciprocal_math"
+ "#"
+ "&& reload_completed"
+ [(set (reg:SI 1) (match_dup 3))
+ (set (reg:SF 0) (match_dup 2))
+ (parallel
+ [(set (reg:SF 0)
+ (div:SF (match_dup 1)
+ (reg:SF 0)))
+ (use (reg:SI 1))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_20))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 5)
+ (match_dup 6)])
+ (set (match_dup 0) (reg:SF 0))]
+ "operands[5] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 2);
+ operands[6] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 1);"
+ [(set_attr "type" "fp_sfunc")
+ (set_attr "length" "16,24")])
+
+(define_insn "*recipsf2_2"
+ [(match_parallel 1 "float_operation"
+ [(set (reg:SF 0)
+ (div:SF (match_operand:SF 0 "const_float_1_operand" "")
+ (reg:SF 0)))
+ (use (reg:SI 1))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_20))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ "flag_reciprocal_math"
+ "jalr r1"
+ [(set_attr "type" "fp_sfunc")])
+
+
+;; Fused multiply-add
+(define_expand "fmasf4"
+ [(parallel
+ [(set (match_operand:SF 0 "gpr_operand" "")
+ (fma:SF (match_operand:SF 1 "gpr_operand" "")
+ (match_operand:SF 2 "gpr_operand" "")
+ (match_operand:SF 3 "gpr_operand" "")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ "")
+
+; The multiply operands are commutative, but since they have the
+; same constraints, there is no point in telling reload about this.
+(define_insn "*fmadd"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SF 0 "gpr_operand" "=r")
+ (fma:SF (match_operand:SF 1 "gpr_operand" "r")
+ (match_operand:SF 2 "gpr_operand" "r")
+ (match_operand:SF 3 "gpr_operand" "0")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "fmadd %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+; Once vetorization consistently works for this port, should check
+; if the fmadd / fmsub patterns still serve a purpose. With the
+; introduction of fma / fnma handling by the SSA optimizers,
+; at least scalars should be handled by these optimizers, would
+; have to see how well they do on vectors from auto-vectorization.
+;
+; combiner pattern, also used by vector combiner pattern
+(define_expand "maddsf"
+ [(parallel
+ [(set (match_operand:SF 0 "gpr_operand" "=r")
+ (plus:SF (mult:SF (match_operand:SF 1 "gpr_operand" "r")
+ (match_operand:SF 2 "gpr_operand" "r"))
+ (match_operand:SF 3 "gpr_operand" "0")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ "TARGET_FUSED_MADD")
+
+(define_insn "*maddsf_combine"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SF 0 "gpr_operand" "=r")
+ (plus:SF (mult:SF (match_operand:SF 1 "gpr_operand" "r")
+ (match_operand:SF 2 "gpr_operand" "r"))
+ (match_operand:SF 3 "gpr_operand" "0")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ "TARGET_FUSED_MADD"
+ "fmadd %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+;; Fused multiply-sub
+(define_expand "fnmasf4"
+ [(parallel
+ [(set (match_operand:SF 0 "gpr_operand" "")
+ (fma:SF (neg:SF (match_operand:SF 1 "gpr_operand" ""))
+ (match_operand:SF 2 "gpr_operand" "")
+ (match_operand:SF 3 "gpr_operand" "")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ "")
+
+(define_insn "*fmsub"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SF 0 "gpr_operand" "=r")
+ (fma:SF (neg:SF (match_operand:SF 1 "gpr_operand" "r"))
+ (match_operand:SF 2 "gpr_operand" "r")
+ (match_operand:SF 3 "gpr_operand" "0")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "fmsub %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "*fmsub_combine"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SF 0 "gpr_operand" "=r")
+ (minus:SF (match_operand:SF 3 "gpr_operand" "0")
+ (mult:SF (match_operand:SF 1 "gpr_operand" "r")
+ (match_operand:SF 2 "gpr_operand" "r"))))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ "TARGET_FUSED_MADD"
+ "fmsub %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+;; float / integer conversions
+
+(define_expand "floatsisf2"
+ [(parallel
+ [(set (match_operand:SF 0 "gpr_operand" "")
+ (float:SF (match_operand:SI 1 "gpr_operand" "")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])])
+
+(define_insn "*floatsisf2_i"
+ [(match_parallel 2 "float_operation"
+ [(set (match_operand:SF 0 "gpr_operand" "=r")
+ (float:SF (match_operand:SI 1 "gpr_operand" "r")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "float %0, %1"
+ [(set_attr "type" "fp")])
+
+(define_expand "floatsisf2_cmp"
+ [(parallel
+ [(set (reg:CC_FP CCFP_REGNUM)
+ (compare:CC_FP (float:SF (match_operand:SF 1 "gpr_operand" "r"))
+ (match_dup 2)))
+ (set (match_operand:SF 0 "gpr_operand" "=r")
+ (float:SF (match_dup 1)))])]
+ ""
+ "operands[2] = CONST0_RTX (SFmode);")
+
+(define_insn "*floatsisf2_cmp_i"
+ [(match_parallel 3 "float_operation"
+ [(set (reg:CC_FP CCFP_REGNUM)
+ (compare:CC_FP (float:SF (match_operand:SF 1 "gpr_operand" "r"))
+ (match_operand:SF 2 "const0_operand" "")))
+ (set (match_operand:SF 0 "gpr_operand" "=r")
+ (float:SF (match_dup 1)))])]
+ ""
+ "float %0, %1"
+ [(set_attr "type" "fp")])
+
+(define_expand "floatunssisf2"
+ [(set (match_operand:SF 0 "gpr_operand" "")
+ (float:SF (match_operand:SI 1 "gpr_operand" "")))]
+ "epiphany_normal_fp_rounding == /*FP_MODE_ROUND_TRUNC*/ 2"
+{
+ rtx cst = force_reg (SImode, gen_int_mode (0xb0800000, SImode));
+ rtx tmp = gen_reg_rtx (SImode);
+ rtx cmp = gen_rtx_GTU (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM), const0_rtx);
+
+ if (reg_overlap_mentioned_p (operands[0], operands[1]))
+ operands[1] = copy_to_mode_reg (SImode, operands[1]);
+ emit_insn (gen_floatsisf2 (operands[0], operands[1]));
+ emit_insn (gen_ashrsi3 (tmp, operands[1], GEN_INT (8)));
+ emit_insn (gen_sub_f (tmp, tmp, cst));
+ emit_insn (gen_movsfcc (operands[0], cmp,
+ simplify_gen_subreg (SFmode, tmp, SImode, 0),
+ operands[0]));
+ DONE;
+})
+
+(define_expand "fix_truncsfsi2"
+ [(parallel
+ [(set (match_operand:SI 0 "gpr_operand" "")
+ (fix:SI (match_operand:SF 1 "gpr_operand" "")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])])
+
+(define_insn "*fix_truncsfsi2_i"
+ [(match_parallel 2 "float_operation"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (fix:SI (match_operand:SF 1 "gpr_operand" "r")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "fix %0, %1"
+ [(set_attr "type" "fp")
+ (set_attr "fp_mode" "round_trunc")])
+
+(define_expand "fixuns_truncsfsi2"
+ [(set (match_operand:SI 0 "gpr_operand" "")
+ (unsigned_fix:SI (match_operand:SF 1 "gpr_operand" "")))]
+ ""
+{
+ if (reg_overlap_mentioned_p (operands[0], operands[1]))
+ operands[1] = copy_to_mode_reg (SImode, operands[1]);
+ if (TARGET_SOFT_CMPSF || optimize_function_for_speed_p (cfun))
+ {
+ rtx op1si;
+ /* By toggling what it to be bit31 before the shift, we get a chance to
+ use a short movt insn. */
+ rtx bit31 = force_reg (SImode, GEN_INT (0x800000));
+ rtx tmp = gen_reg_rtx (SImode);
+ rtx limit = force_reg (SImode, gen_int_mode (0x4f000000, SImode));
+ rtx cmp
+ = gen_rtx_GE (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM), const0_rtx);
+
+ op1si = simplify_gen_subreg (SImode, operands[1], SFmode, 0);
+ emit_insn (gen_fix_truncsfsi2 (operands[0], operands[1]));
+ emit_insn (gen_subsi3 (tmp, op1si, bit31));
+ emit_insn (gen_ashlsi3 (tmp, tmp, GEN_INT (8)));
+ emit_insn (gen_cmpsi_cc_insn (op1si, limit));
+ emit_insn (gen_movsicc (operands[0], cmp, tmp, operands[0]));
+ }
+ else
+ {
+ REAL_VALUE_TYPE offset;
+ rtx limit;
+ rtx tmp = gen_reg_rtx (SFmode);
+ rtx label = gen_label_rtx ();
+ rtx bit31;
+ rtx cc1 = gen_rtx_REG (CC_FPmode, CCFP_REGNUM);
+ rtx cmp = gen_rtx_LT (VOIDmode, cc1, CONST0_RTX (SFmode));
+
+ real_2expN (&offset, 31, SFmode);
+ limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, SFmode);
+ limit = force_reg (SFmode, limit);
+ emit_insn (gen_fix_truncsfsi2 (operands[0], operands[1]));
+ emit_insn (gen_subsf3_f (tmp, operands[1], limit));
+ emit_jump_insn (gen_branch_insn (label, cmp, cc1));
+ bit31 = force_reg (SImode, gen_int_mode (0x80000000, SImode));
+ emit_insn (gen_fix_truncsfsi2 (operands[0], tmp));
+ emit_insn (gen_xorsi3 (operands[0], operands[0], bit31));
+ emit_label (label);
+ }
+ DONE;
+})
+
+(define_insn "*iadd"
+ [(match_parallel 3 "float_operation"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (plus:SI (match_operand:SI 1 "gpr_operand" "%r")
+ (match_operand:SI 2 "gpr_operand" "r")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "iadd %0, %1, %2"
+ [(set_attr "type" "fp_int")])
+
+(define_insn "*isub"
+ [(match_parallel 3 "float_operation"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (minus:SI (match_operand:SI 1 "gpr_operand" "r")
+ (match_operand:SI 2 "gpr_operand" "r")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "isub %0, %1, %2"
+ [(set_attr "type" "fp_int")])
+
+(define_expand "mulsi3"
+ [(parallel
+ [(set (match_operand:SI 0 "gpr_operand" "")
+ (mult:SI (match_operand:SI 1 "gpr_operand" "")
+ (match_operand:SI 2 "gpr_operand" "")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])])
+
+(define_insn "*imul"
+ [(match_parallel 3 "float_operation"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (mult:SI (match_operand:SI 1 "gpr_operand" "%r")
+ (match_operand:SI 2 "gpr_operand" "r")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "imul %0, %1, %2"
+ [(set_attr "type" "fp_int")])
+
+; combiner pattern, also used by vector combiner pattern
+(define_expand "maddsi"
+ [(parallel
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (plus:SI (mult:SI (match_operand:SI 1 "gpr_operand" "r")
+ (match_operand:SI 2 "gpr_operand" "r"))
+ (match_operand:SI 3 "gpr_operand" "0")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ "")
+
+(define_insn "*maddsi_combine"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (plus:SI (mult:SI (match_operand:SI 1 "gpr_operand" "r")
+ (match_operand:SI 2 "gpr_operand" "r"))
+ (match_operand:SI 3 "gpr_operand" "0")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "imsub %0, %1, %2"
+ [(set_attr "type" "fp_int")])
+
+(define_insn "*imsub"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (minus:SI (match_operand:SI 3 "gpr_operand" "0")
+ (mult:SI (match_operand:SI 1 "gpr_operand" "r")
+ (match_operand:SI 2 "gpr_operand" "r"))))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "imsub %0, %1, %2"
+ [(set_attr "type" "fp_int")])
+
+(define_expand "divsi3"
+ [(parallel
+ [(set (match_operand:SI 0 "move_dest_operand" "")
+ (div:SI (match_operand:SI 1 "move_src_operand" "")
+ (match_operand:SI 2 "move_src_operand" "")))
+ (use (match_dup 3))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_20))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "operands[3] = sfunc_symbol (\"__divsi3\");")
+
+;; Before reload, keep the hard reg usage to clobbers so that the loop
+;; optimizers can more easily move this insn.
+(define_insn_and_split "*divsi3_1"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SI 0 "move_dest_operand" "=r,r")
+ (div:SI (match_operand:SI 1 "move_src_operand" "rU16m,rU16mCal")
+ (match_operand:SI 2 "move_src_operand" "rU16m,rU16mCal")))
+ (use (match_operand:SI 3 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_20))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "#"
+ "&& reload_completed"
+ [(set (reg:SI 0) (match_dup 1))
+ (set (reg:SI 1) (match_dup 2))
+ (parallel
+ [(set (reg:SI 0) (div:SI (reg:SI 0) (reg:SI 1)))
+ (use (match_dup 3))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_20))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 5)
+ (match_dup 6)])
+ (set (match_dup 0) (reg:SI 0))]
+ "operands[5] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 2);
+ operands[6] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 1);"
+ [(set_attr "type" "fp_sfunc")
+ (set_attr "length" "16,24")])
+
+(define_insn "*divsi3_2"
+ [(match_parallel 1 "float_operation"
+ [(set (reg:SI 0) (div:SI (reg:SI 0) (reg:SI 1)))
+ (use (match_operand:SI 0 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_20))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "%f0"
+ [(set_attr "type" "fp_sfunc")])
+
+(define_expand "udivsi3"
+ [(parallel
+ [(set (match_operand:SI 0 "move_dest_operand" "")
+ (udiv:SI (match_operand:SI 1 "move_src_operand" "")
+ (match_operand:SI 2 "move_src_operand" "")))
+ (use (match_dup 3))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:SI GPR_18))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "operands[3] = sfunc_symbol (\"__udivsi3\");")
+
+;; Before reload, keep the hard reg usage to clobbers so that the loop
+;; optimizers can more easily move this insn.
+(define_insn_and_split "*udivsi3_1"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SI 0 "move_dest_operand" "=r,r")
+ (udiv:SI (match_operand:SI 1 "move_src_operand" "rU16m,rU16mCal")
+ (match_operand:SI 2 "move_src_operand" "rU16m,rU16mCal")))
+ (use (match_operand:SI 3 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:SI GPR_18))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "#"
+ "&& reload_completed"
+ [(set (reg:SI 0) (match_dup 1))
+ (set (reg:SI 1) (match_dup 2))
+ (parallel
+ [(set (reg:SI 0) (udiv:SI (reg:SI 0) (reg:SI 1)))
+ (use (match_dup 3))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:SI GPR_18))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 5)
+ (match_dup 6)])
+ (set (match_dup 0) (reg:SI 0))]
+ "operands[5] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 2);
+ operands[6] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 1);"
+ [(set_attr "type" "fp_sfunc")
+ (set_attr "length" "16,24")])
+
+(define_insn "*udivsi3_2"
+ [(match_parallel 1 "float_operation"
+ [(set (reg:SI 0) (udiv:SI (reg:SI 0) (reg:SI 1)))
+ (use (match_operand:SI 0 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:SI GPR_18))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "%f0"
+ [(set_attr "type" "fp_sfunc")])
+
+(define_expand "modsi3"
+ [(parallel
+ [(set (match_operand:SI 0 "move_dest_operand" "")
+ (mod:SI (match_operand:SI 1 "move_src_operand" "")
+ (match_operand:SI 2 "move_src_operand" "")))
+ (use (match_dup 3))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "operands[3] = sfunc_symbol (\"__modsi3\");")
+
+;; Before reload, keep the hard reg usage to clobbers so that the loop
+;; optimizers can more easily move this insn.
+(define_insn_and_split "*modsi3_1"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SI 0 "move_dest_operand" "=r,r")
+ (mod:SI (match_operand:SI 1 "move_src_operand" "rU16m,rU16mCal")
+ (match_operand:SI 2 "move_src_operand" "rU16m,rU16mCal")))
+ (use (match_operand:SI 3 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "#"
+ "&& reload_completed"
+ [(set (reg:SI 0) (match_dup 1))
+ (set (reg:SI 1) (match_dup 2))
+ (parallel
+ [(set (reg:SI 0) (mod:SI (reg:SI 0) (reg:SI 1)))
+ (use (match_dup 3))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 5)
+ (match_dup 6)])
+ (set (match_dup 0) (reg:SI 0))]
+ "operands[5] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 2);
+ operands[6] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 1);"
+ [(set_attr "type" "fp_sfunc")
+ (set_attr "length" "16,24")])
+
+(define_insn "*modsi3_2"
+ [(match_parallel 1 "float_operation"
+ [(set (reg:SI 0) (mod:SI (reg:SI 0) (reg:SI 1)))
+ (use (match_operand:SI 0 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:DI GPR_18))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "%f0"
+ [(set_attr "type" "fp_sfunc")])
+
+(define_expand "umodsi3"
+ [(parallel
+ [(set (match_operand:SI 0 "move_dest_operand" "")
+ (umod:SI (match_operand:SI 1 "move_src_operand" "")
+ (match_operand:SI 2 "move_src_operand" "")))
+ (use (match_dup 3))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "operands[3] = sfunc_symbol (\"__umodsi3\");")
+
+;; Before reload, keep the hard reg usage to clobbers so that the loop
+;; optimizers can more easily move this insn.
+(define_insn_and_split "*umodsi3_1"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:SI 0 "move_dest_operand" "=r,r")
+ (umod:SI (match_operand:SI 1 "move_src_operand" "rU16m,rU16mCal")
+ (match_operand:SI 2 "move_src_operand" "rU16m,rU16mCal")))
+ (use (match_operand:SI 3 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "#"
+ "&& reload_completed"
+ [(set (reg:SI 0) (match_dup 1))
+ (set (reg:SI 1) (match_dup 2))
+ (parallel
+ [(set (reg:SI 0) (umod:SI (reg:SI 0) (reg:SI 1)))
+ (use (match_dup 3))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 5)
+ (match_dup 6)])
+ (set (match_dup 0) (reg:SI 0))]
+ "operands[5] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 2);
+ operands[6] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 1);"
+ [(set_attr "type" "fp_sfunc")
+ (set_attr "length" "16,24")])
+
+(define_insn "*umodsi3_2"
+ [(match_parallel 1 "float_operation"
+ [(set (reg:SI 0) (umod:SI (reg:SI 0) (reg:SI 1)))
+ (use (match_operand:SI 0 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:DI GPR_16))
+ (clobber (reg:SI GPR_LR))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "%f0"
+ [(set_attr "type" "fp_sfunc")])
+
+; Disable interrupts.
+; Any earlier values read from CONFIG_REGNUM are out of date, since interrupts
+; might have changed settings that we do not want to mess with.
+(define_insn "gid"
+ [(set (reg:SI CONFIG_REGNUM)
+ (unspec_volatile:SI [(const_int 0)] UNSPECV_GID))]
+ ""
+ "gid"
+ [(set_attr "type" "flow")])
+
+; Enable interrupts.
+; Present CONTROL_REGNUM here to make sure it is live before the
+; actual uses in floating point insns / calls are inserted.
+; FWIW, interrupts also do mind what is in the control register.
+(define_insn "gie"
+ [(unspec_volatile [(reg:SI CONFIG_REGNUM)] UNSPECV_GIE)]
+ ""
+ "gie"
+ [(set_attr "type" "flow")])
+
+; Floating point instructions require manipulating the control register.
+; Manipulating the control register needs aritmetic.
+; Arithmetic clobbers flags.
+; The flags are in the status register, which also contains the alternate
+; flag and the interrupt enable/disable bits.
+; saving/restoring status and mixing up the order with gid/gie could
+; lead to disaster.
+; Usually, saving/restoring the status is unnecessary, and will be optimized
+; away. But when we really need it, we must make sure that we don't change
+; anything but the flags.
+; N.B.: We could make the constant easier to load by inverting it, but
+; then we'd need to clobber the saved value - and that would make optimizing
+; away unneeded saves/restores harder / less likely.
+(define_expand "movcc"
+ [(parallel [(set (match_operand:CC 0 "cc_move_operand" "")
+ (match_operand:CC 1 "cc_move_operand" ""))
+ (use (match_dup 2))
+ (clobber (match_scratch:SI 3 "=X, &r"))])]
+ ""
+ "operands[2] = gen_int_mode (~0x10f0, SImode);")
+
+(define_insn "*movcc_i"
+ [(set (match_operand:CC 0 "cc_move_operand" "=r,Rcc")
+ (match_operand:CC 1 "cc_move_operand" "Rcc, r"))
+ (use (match_operand:SI 2 "nonmemory_operand" "X, r"))
+ (clobber (match_scratch:SI 3 "=X, &r"))]
+ ""
+ "@
+ movfs %0,status
+ movfs %3,status\;eor %3,%3,%1\;and %3,%3,%2\;eor %3,%3,%1\;movts status,%3"
+ [(set_attr "type" "flow")
+ (set_attr "length" "20,4")])
+
+(define_insn_and_split "set_fp_mode"
+ [(set (reg:SI FP_NEAREST_REGNUM)
+ (match_operand:SI 0 "set_fp_mode_operand" "rCfm"))
+ (set (reg:SI FP_TRUNCATE_REGNUM) (match_dup 0))
+ (set (reg:SI FP_ANYFP_REGNUM)
+ (match_operand:SI 1 "set_fp_mode_operand" "rCfm"))
+ (use (match_operand:SI 2 "gpr_operand" "r"))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ ""
+ "#"
+ "reload_completed || !rtx_equal_p (operands[0], operands[1])"
+ [(const_int 0)]
+{
+ if (!reload_completed)
+ emit_note (NOTE_INSN_DELETED);
+ else
+ epiphany_expand_set_fp_mode (operands);
+ DONE;
+})
+
+
+;; Boolean instructions.
+;;
+;; We don't define the DImode versions as expand_binop does a good enough job.
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (and:SI (match_operand:SI 1 "gpr_operand" "r")
+ (match_operand:SI 2 "gpr_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "and %0,%1,%2")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (ior:SI (match_operand:SI 1 "gpr_operand" "r")
+ (match_operand:SI 2 "gpr_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "orr %0,%1,%2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (xor:SI (match_operand:SI 1 "gpr_operand" "r")
+ (match_operand:SI 2 "gpr_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "eor %0,%1,%2")
+
+(define_expand "one_cmplsi2"
+ [(set (match_operand:SI 0 "gpr_operand" "")
+ (xor:SI (match_operand:SI 1 "gpr_operand" "")
+ (match_dup 2)))]
+ ""
+{
+ if (epiphany_m1reg >= 0)
+ emit_insn (gen_one_cmplsi2_i (operands[0], operands[1]));
+ else
+ emit_insn (gen_xorsi3 (operands[0], operands[1],
+ force_reg (SImode, GEN_INT (-1))));
+ DONE;
+})
+
+; Note that folding this pattern into the xorsi3 pattern would make combine
+; less effective.
+(define_insn "one_cmplsi2_i"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (not:SI (match_operand:SI 1 "gpr_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ "epiphany_m1reg >= 0"
+ "eor %0,%1,%-")
+
+;; Shift instructions.
+;; In principle we could support arbitrary symbolic values as shift constant
+;; (truncating the value appropriately), but that would require a suitable
+;; relocation and assembler & linker support.
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "gpr_operand" "=r,r")
+ (ashiftrt:SI (match_operand:SI 1 "gpr_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "r,K")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "asr %0,%1,%2"
+ [(set_attr "length" "4")
+ (set_attr "type" "shift")])
+
+(define_insn "ashrsi3_tst"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (ashiftrt:SI (match_operand:SI 1 "gpr_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "r,K"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpr_operand" "=r,r")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "asr %0,%1,%2"
+ [(set_attr "length" "4")
+ (set_attr "type" "shift")])
+
+;; Logical Shift Right
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "gpr_operand" "=r,r")
+ (lshiftrt:SI (match_operand:SI 1 "gpr_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "r,K")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "lsr %0,%1,%2"
+ [(set_attr "length" "4")
+ (set_attr "type" "shift")])
+
+(define_insn "lshrsi3_tst"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC
+ (lshiftrt:SI (match_operand:SI 1 "gpr_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "r,K"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpr_operand" "=r,r")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "lsr %0,%1,%2"
+ [(set_attr "length" "4")
+ (set_attr "type" "shift")])
+
+;; Logical/Arithmetic Shift Left
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "gpr_operand" "=r,r")
+ (ashift:SI (match_operand:SI 1 "gpr_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "r,K")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "lsl %0,%1,%2"
+ [(set_attr "length" "4")
+ (set_attr "type" "shift")])
+
+(define_insn "*ashlsi_btst"
+ [(set (reg:CC_N_NE CC_REGNUM)
+ (compare:CC_N_NE
+ (zero_extract:SI (match_operand:SI 1 "gpr_operand" "r")
+ (const_int 1)
+ (match_operand 2 "const_int_operand" "K"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+{
+ rtx xop[3];
+
+ xop[0] = operands[0];
+ xop[1] = operands[1];
+ xop[2] = GEN_INT (31-INTVAL (operands[2]));
+ output_asm_insn ("lsl %0,%1,%2", xop);
+ return "";
+})
+
+;; zero extensions
+(define_insn_and_split "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "@
+ #
+ ldrb %0,%1"
+ "reload_completed
+ ? true_regnum (operands[1]) >= 0
+ : REG_P (operands[1]) && REGNO (operands[1]) < FIRST_PSEUDO_REGISTER"
+ [(parallel [(set (match_dup 0) (ashift:SI (match_dup 2) (const_int 24)))
+ (clobber (reg:CC CC_REGNUM))])
+ (parallel [(set (match_dup 0) (lshiftrt:SI (match_dup 0) (const_int 24)))
+ (clobber (reg:CC CC_REGNUM))])]
+ "operands[2] = simplify_gen_subreg (SImode, operands[1], QImode, 0);")
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,m")))]
+ ""
+ "@
+ movt %0, 0
+ ldrh %0,%c1")
+
+
+;; Compare instructions.
+
+(define_insn "cmpsi_cc_insn"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:SI 0 "add_reg_operand" "r,r")
+ (match_operand:SI 1 "arith_operand" "r,L")))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ ""
+ "sub %2,%0,%1"
+ [(set_attr "type" "compare")])
+
+(define_insn "sub_f"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:SI 1 "gpr_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "r,L")))
+ (set (match_operand:SI 0 "gpr_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "sub %0,%1,%2"
+ [(set_attr "type" "compare")])
+
+(define_insn "*sub_f_add_imm"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:SI 1 "gpr_operand" "r")
+ (match_operand:SI 2 "arith_int_operand" "L")))
+ (set (match_operand:SI 0 "gpr_operand" "=r")
+ (plus:SI (match_dup 1) (match_operand:SI 3 "const_int_operand" "L")))]
+ "INTVAL (operands[2]) == -INTVAL (operands[3])"
+ "sub %0,%1,%2"
+ [(set_attr "type" "compare")])
+
+(define_expand "abssi2"
+ [(set (match_dup 2) (const_int 0))
+ (parallel [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_dup 2)
+ (match_operand:SI 1 "nonmemory_operand" "")))
+ (set (match_dup 3)
+ (minus:SI (match_dup 2) (match_dup 1)))])
+ (set (match_operand:SI 0 "gpr_operand" "=r")
+ (if_then_else:SI (gt:SI (reg:CC CC_REGNUM) (const_int 0))
+ (match_dup 3)
+ (match_dup 1)))]
+ "TARGET_CMOVE"
+ "operands[2] = gen_reg_rtx (SImode); operands[3] = gen_reg_rtx (SImode);")
+
+(define_insn "*add_c"
+ [(set (reg:CC_C_LTU CC_REGNUM)
+ (compare:CC_C_LTU
+ (plus:SI (match_operand:SI 1 "gpr_operand" "%r,r")
+ (match_operand:SI 2 "arith_operand" "r,L"))
+ (match_dup 1)))
+ (set (match_operand:SI 0 "gpr_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "add %0,%1,%2"
+ [(set_attr "type" "compare")])
+
+(define_insn "*add_c_rev"
+ [(set (reg:CC_C_LTU CC_REGNUM)
+ (compare:CC_C_LTU
+ (plus:SI (match_operand:SI 1 "gpr_operand" "%r,r")
+ (match_operand:SI 2 "arith_operand" "r,L"))
+ (match_dup 1)))
+ (set (match_operand:SI 0 "gpr_operand" "=r,r")
+ (plus:SI (match_dup 2) (match_dup 1)))]
+ ""
+ "add %0,%1,%2"
+ [(set_attr "type" "compare")])
+
+(define_insn "*sub_c"
+ [(set (reg:CC_C_GTU CC_REGNUM)
+ (compare:CC_C_GTU
+ (minus:SI (match_operand:SI 1 "gpr_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "r,L"))
+ (match_dup 1)))
+ (set (match_operand:SI 0 "gpr_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "sub %0,%1,%2"
+ [(set_attr "type" "compare")])
+
+(define_insn "*sub_c_void"
+ [(set (reg:CC_C_GTU CC_REGNUM)
+ (compare:CC_C_GTU
+ (minus:SI (match_operand:SI 1 "gpr_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "r,L"))
+ (match_dup 1)))
+ (clobber (match_scratch:SI 0 "=r,r"))]
+ ""
+ "sub %0,%1,%2"
+ [(set_attr "type" "compare")])
+
+; floating point comparisons
+
+(define_insn "*cmpsf_cc_insn"
+ [(match_parallel 3 "float_operation"
+ [(set (reg:CC_FP CCFP_REGNUM)
+ (compare:CC_FP (match_operand:SF 0 "gpr_operand" "r")
+ (match_operand:SF 1 "gpr_operand" "r")))
+ (clobber (match_scratch:SF 2 "=r"))])]
+ "!TARGET_SOFT_CMPSF"
+ "fsub %2,%0,%1"
+ [(set_attr "type" "fp")
+ (set_attr "fp_mode" "round_unknown")])
+
+;; ??? do we have to relax the operand0 predicate to immediate_operand
+;; to allow the rtl loop optimizer to generate comparisons? OTOH
+;; we want call_address_operand to enforce valid operands so that
+;; combine won't do silly things, allowing instruction scheduling to do
+;; a proper job.
+(define_insn "*cmpsf_eq"
+ [(set (reg:CC_FP_EQ CC_REGNUM) (compare:CC_FP_EQ (reg:SF 0) (reg:SF 1)))
+ (use (match_operand:SI 0 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:SI GPR_LR))]
+ "TARGET_SOFT_CMPSF"
+ "%f0"
+ [(set_attr "type" "sfunc")])
+
+(define_insn "*cmpsf_gte"
+ [(set (reg:CC_FP_GTE CC_REGNUM) (compare:CC_FP_GTE (reg:SF 0) (reg:SF 1)))
+ (use (match_operand:SI 0 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:SI GPR_LR))]
+ "TARGET_SOFT_CMPSF"
+ "%f0"
+ [(set_attr "type" "sfunc")])
+
+(define_insn "*cmpsf_ord"
+ [(set (reg:CC_FP_ORD CC_REGNUM) (compare:CC_FP_ORD (reg:SF 0) (reg:SF 1)))
+ (use (match_operand:SI 0 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:SI GPR_16))
+ (clobber (reg:SI GPR_LR))]
+ "TARGET_SOFT_CMPSF"
+ "%f0"
+ [(set_attr "type" "sfunc")])
+
+(define_insn "*cmpsf_uneq"
+ [(set (reg:CC_FP_UNEQ CC_REGNUM) (compare:CC_FP_UNEQ (reg:SF 0) (reg:SF 1)))
+ (use (match_operand:SI 0 "call_address_operand" "Csy,r"))
+ (clobber (reg:SI GPR_IP))
+ (clobber (reg:SI GPR_16))
+ (clobber (reg:SI GPR_LR))]
+ "TARGET_SOFT_CMPSF"
+ "%f0"
+ [(set_attr "type" "sfunc")])
+
+;; conditional moves
+
+(define_expand "mov<mode>cc"
+ [(set (match_operand:WMODE 0 "gpr_operand" "")
+ (if_then_else:WMODE (match_operand 1 "comparison_operator" "")
+ (match_operand:WMODE 2 "gpr_operand" "")
+ (match_operand:WMODE 3 "gpr_operand" "")))]
+ "TARGET_CMOVE"
+{
+ rtx cmp_op0 = XEXP (operands[1], 0);
+ rtx cmp_op1 = XEXP (operands[1], 1);
+ enum machine_mode cmp_in_mode;
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ cmp_in_mode = GET_MODE (cmp_op0);
+ if (cmp_in_mode == VOIDmode)
+ cmp_in_mode = GET_MODE (cmp_op0);
+ if (cmp_in_mode == VOIDmode)
+ cmp_in_mode = SImode;
+ /* If the operands are a better match when reversed, swap them now.
+ This allows combine to see the proper comparison codes. */
+ if (rtx_equal_p (operands[0], operands[2])
+ && !rtx_equal_p (operands[0], operands[3]))
+ {
+ rtx tmp = operands[2]; operands[2] = operands[3]; operands[3] = tmp;
+ code = (FLOAT_MODE_P (GET_MODE (cmp_op0))
+ ? reverse_condition_maybe_unordered (code)
+ : reverse_condition (code));
+ }
+
+ if (proper_comparison_operator (operands[1], VOIDmode))
+ operands[1] = gen_rtx_fmt_ee (code, cmp_in_mode, cmp_op0, cmp_op1);
+ else
+ {
+ if (!currently_expanding_to_rtl)
+ {
+ /* ??? It would seem safest to FAIL here, but that would defeat
+ the purpose of having an if-conversion pass; its logic currently
+ assumes that the backend should be safe to insert condition code
+ setting instructions, as the same condition codes were presumably
+ set by the if-conversion input code. */
+ }
+ /* What mode to give as first operand to gen_compare_reg here is
+ debatable. VOIDmode would be minimalist; telling gen_compare_reg
+ to use the mode of CC_REGNUM (or putting it on the comparison
+ operator afterwards) is also a logical choice. OTOH, by using
+ <MODE>mode, we have mode combine opportunities with flag setting
+ operations - if we get some. */
+ operands[1]
+ = gen_compare_reg (<MODE>mode, code, cmp_in_mode, cmp_op0, cmp_op1);
+ }
+})
+
+(define_insn "*mov<mode>cc_insn"
+ [(set (match_operand:WMODE 0 "gpr_operand" "=r")
+ (if_then_else:WMODE (match_operator 3 "proper_comparison_operator"
+ [(match_operand 4 "cc_operand") (const_int 0)])
+ (match_operand:WMODE 1 "gpr_operand" "r")
+ (match_operand:WMODE 2 "gpr_operand" "0")))]
+ "TARGET_CMOVE"
+ "mov%d3 %0,%1"
+ [(set_attr "type" "cmove")])
+
+(define_peephole2
+ [(parallel [(set (match_operand:WMODE 0 "gpr_operand" "")
+ (match_operand:WMODE 1 "" ""))
+ (clobber (match_operand 8 "cc_operand"))])
+ (match_operand 2 "" "")
+ (set (match_operand:WMODE2 3 "gpr_operand" "")
+ (match_operand:WMODE2 9 "gpr_operand" ""))
+ (set (match_dup 3)
+ (if_then_else:WMODE2 (match_operator 5 "proper_comparison_operator"
+ [(match_operand 6 "cc_operand")
+ (match_operand 7 "const0_operand")])
+ (match_operand:WMODE2 4 "nonmemory_operand" "")
+ (match_dup 3)))]
+ "REGNO (operands[0]) == REGNO (operands[9])
+ && peep2_reg_dead_p (3, operands[0])
+ && !reg_set_p (operands[0], operands[2])
+ && !reg_set_p (operands[3], operands[2])
+ && !reg_overlap_mentioned_p (operands[3], operands[2])"
+ [(parallel [(set (match_dup 10) (match_dup 1))
+ (clobber (match_dup 8))])
+ (match_dup 2)
+ (set (match_dup 3)
+ (if_then_else:WMODE2 (match_dup 5) (match_dup 4) (match_dup 3)))]
+{
+ operands[10] = simplify_gen_subreg (<WMODE:MODE>mode, operands[3],
+ <WMODE2:MODE>mode, 0);
+ replace_rtx (operands[2], operands[9], operands[3]);
+ replace_rtx (operands[2], operands[0], operands[10]);
+ gcc_assert (!reg_overlap_mentioned_p (operands[0], operands[2]));
+})
+
+(define_peephole2
+ [(parallel [(set (match_operand 6 "cc_operand") (match_operand 2 "" ""))
+ (set (match_operand:WMODE 0 "gpr_operand" "")
+ (match_operand:WMODE 1 "" ""))])
+ (set (match_operand:WMODE2 3 "gpr_operand" "")
+ (match_operand:WMODE2 4 "gpr_operand"))
+ (set (match_dup 3)
+ (if_then_else:WMODE2 (match_operator 5 "proper_comparison_operator"
+ [(match_dup 6)
+ (match_operand:WMODE 7 "const0_operand")])
+ (match_operand:WMODE2 8 "gpr_operand")
+ (match_dup 3)))]
+ "REGNO (operands[0]) == REGNO (operands[8])
+ && REVERSIBLE_CC_MODE (GET_MODE (operands[6]))
+ && peep2_reg_dead_p (3, operands[6])
+ && peep2_reg_dead_p (3, operands[0])
+ && !reg_overlap_mentioned_p (operands[4], operands[3])"
+ [(parallel [(set (match_dup 6) (match_dup 2))
+ (set (match_dup 9) (match_dup 1))])
+ (set (match_dup 3)
+ (if_then_else:WMODE2 (match_dup 5) (match_dup 4) (match_dup 3)))]
+ "
+{
+ operands[5]
+ = gen_rtx_fmt_ee (REVERSE_CONDITION (GET_CODE (operands[5]),
+ GET_MODE (operands[6])),
+ GET_MODE (operands[5]), operands[6], operands[7]);
+ operands[9] = simplify_gen_subreg (<WMODE:MODE>mode, operands[3],
+ <WMODE2:MODE>mode, 0);
+}")
+
+;; These control RTL generation for conditional jump insns
+
+;; To signal to can_compare_p that the cbranchs?4 patterns work,
+;; they must allow const0_rtx for both comparison operands
+(define_expand "cbranchsi4"
+ [(set (reg CC_REGNUM)
+ (compare (match_operand:SI 1 "add_operand" "")
+ (match_operand:SI 2 "arith_operand" "")))
+ (set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator" [(reg CC_REGNUM)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+{
+ rtx cmp = gen_compare_reg (VOIDmode, GET_CODE (operands[0]), SImode,
+ operands[1], operands[2]);
+ emit_jump_insn (gen_branch_insn (operands[3], cmp, XEXP (cmp, 0)));
+ DONE;
+})
+
+(define_expand "cbranchsf4"
+ [(set (reg CC_REGNUM)
+ (compare (match_operand:SF 1 "arith_operand" "")
+ (match_operand:SF 2 "arith_operand" "")))
+ (set (pc)
+ (if_then_else
+ (match_operator 0 "comparison_operator" [(reg CC_REGNUM)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+{
+ rtx cmp = gen_compare_reg (VOIDmode, GET_CODE (operands[0]), SFmode,
+ operands[1], operands[2]);
+ emit_jump_insn (gen_branch_insn (operands[3], cmp, XEXP (cmp, 0)));
+ DONE;
+})
+
+;; Now match both normal and inverted jump.
+
+(define_insn "branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "proper_comparison_operator"
+ [(match_operand 2 "cc_operand")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "b%d1 %l0"
+ [(set_attr "type" "branch")])
+
+(define_insn "*rev_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "proper_comparison_operator"
+ [(reg CC_REGNUM) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "b%D1 %l0"
+ [(set_attr "type" "branch")])
+
+;; Unconditional and other jump instructions.
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "b %l0"
+ [(set_attr "type" "uncond_branch")])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "gpr_operand" "r"))]
+ ""
+ "jr %0"
+ [(set_attr "type" "uncond_branch")])
+
+(define_expand "tablejump"
+ [(parallel [(set (pc) (match_operand:SI 0 "gpr_operand" ""))
+ (use (label_ref (match_operand 1 "" "")))])]
+ ""
+{
+ /* In PIC mode, the table entries are stored PC relative.
+ Convert the relative address to an absolute address. */
+ if (flag_pic)
+ {
+ rtx op1 = gen_rtx_LABEL_REF (Pmode, operands[1]);
+
+ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
+ op1, NULL_RTX, 0, OPTAB_DIRECT);
+ }
+})
+
+(define_insn "*tablejump_internal"
+ [(set (pc) (match_operand:SI 0 "gpr_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jr %0;"
+ [(set_attr "type" "uncond_branch")])
+
+(define_insn "*tablejump_hi_internal"
+ [(set (pc) (match_operand:HI 0 "gpr_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "optimize_size && TARGET_SMALL16"
+ "jr %0;"
+ [(set_attr "type" "uncond_branch")])
+
+
+(define_expand "call"
+ ;; operands[1] is stack_size_rtx
+ ;; operands[2] is next_arg_register
+ [(parallel [(call (match_operand:SI 0 "call_operand" "")
+ (match_operand 1 "" ""))
+ (clobber (reg:SI GPR_LR))])]
+ ""
+{
+ bool target_uninterruptible = epiphany_call_uninterruptible_p (operands[0]);
+
+ if (!call_operand (operands[1], VOIDmode))
+ operands[0]
+ = change_address (operands[0], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[0], 0)));
+ if (epiphany_uninterruptible_p (current_function_decl)
+ != target_uninterruptible)
+ {
+ emit_insn (target_uninterruptible ? gen_gid (): gen_gie ());
+ emit_call_insn
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2, gen_rtx_CALL (VOIDmode, operands[0], operands[1]),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (SImode, GPR_LR)))));
+ emit_insn (target_uninterruptible ? gen_gie (): gen_gid ());
+ DONE;
+ }
+})
+
+(define_insn "*call_i"
+ [(match_parallel 2 "float_operation"
+ [(call (mem:SI (match_operand:SI 0 "call_address_operand" "Csy,r"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI GPR_LR))])]
+ ""
+ "%f0"
+ [(set_attr "type" "call")])
+
+(define_expand "sibcall"
+ ;; operands[1] is stack_size_rtx
+ ;; operands[2] is next_arg_register
+ [(parallel [(call (match_operand:SI 0 "call_operand" "")
+ (match_operand 1 "" ""))
+ (return)])]
+ ""
+{
+ bool target_uninterruptible = epiphany_call_uninterruptible_p (operands[0]);
+
+ if (!call_operand (operands[1], VOIDmode))
+ operands[0]
+ = change_address (operands[0], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[0], 0)));
+ if (epiphany_uninterruptible_p (current_function_decl)
+ != target_uninterruptible)
+ {
+ emit_insn (target_uninterruptible ? gen_gid (): gen_gie ());
+ emit_call_insn
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2, gen_rtx_CALL (VOIDmode, operands[0], operands[1]),
+ ret_rtx)));
+ emit_insn (target_uninterruptible ? gen_gie (): gen_gid ());
+ DONE;
+ }
+})
+
+(define_insn "*sibcall_i"
+ [(call (mem:SI (match_operand:SI 0 "call_address_operand" "Csy,Rsc"))
+ (match_operand 1 "" ""))
+ (return)]
+ ""
+ "@
+ b %0
+ jr %0"
+ [(set_attr "type" "call")])
+
+(define_expand "call_value"
+ ;; operand 2 is stack_size_rtx
+ ;; operand 3 is next_arg_register
+ [(parallel [(set (match_operand 0 "gpr_operand" "=r")
+ (call (match_operand:SI 1 "call_operand" "")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI GPR_LR))])]
+ ""
+{
+ bool target_uninterruptible = epiphany_call_uninterruptible_p (operands[1]);
+
+ if (!call_operand (operands[1], VOIDmode))
+ operands[1]
+ = change_address (operands[1], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[1], 0)));
+ if (epiphany_uninterruptible_p (current_function_decl)
+ != target_uninterruptible)
+ {
+ emit_insn (target_uninterruptible ? gen_gid (): gen_gie ());
+ emit_call_insn
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2, gen_rtx_SET
+ (VOIDmode, operands[0],
+ gen_rtx_CALL (VOIDmode, operands[1], operands[2])),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (SImode, GPR_LR)))));
+ emit_insn (target_uninterruptible ? gen_gie (): gen_gid ());
+ DONE;
+ }
+})
+
+(define_insn "*call_value_i"
+ [(match_parallel 3 "float_operation"
+ [(set (match_operand 0 "gpr_operand" "=r,r")
+ (call (mem:SI (match_operand:SI 1 "call_address_operand" "Csy,r"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI GPR_LR))])]
+ ""
+ "%f1"
+ [(set_attr "type" "call")
+ (set_attr "length" "4")])
+
+(define_expand "sibcall_value"
+ ;; operand 2 is stack_size_rtx
+ ;; operand 3 is next_arg_register
+ [(parallel [(set (match_operand 0 "gpr_operand" "=r")
+ (call (match_operand:SI 1 "call_operand" "")
+ (match_operand 2 "" "")))
+ (return)])]
+ ""
+{
+ bool target_uninterruptible = epiphany_call_uninterruptible_p (operands[1]);
+
+ if (!call_operand (operands[1], VOIDmode))
+ operands[1]
+ = change_address (operands[1], VOIDmode,
+ copy_to_mode_reg (Pmode, XEXP (operands[1], 0)));
+ if (epiphany_uninterruptible_p (current_function_decl)
+ != target_uninterruptible)
+ {
+ emit_insn (target_uninterruptible ? gen_gid (): gen_gie ());
+ emit_call_insn
+ (gen_rtx_PARALLEL
+ (VOIDmode,
+ gen_rtvec (2, gen_rtx_SET
+ (VOIDmode, operands[0],
+ gen_rtx_CALL (VOIDmode, operands[1], operands[2])),
+ ret_rtx)));
+ emit_insn (target_uninterruptible ? gen_gie (): gen_gid ());
+ DONE;
+ }
+})
+
+(define_insn "*sibcall_value_i"
+ [(set (match_operand 0 "gpr_operand" "=r,r")
+ (call (mem:SI (match_operand:SI 1 "call_address_operand" "Csy,Rsc"))
+ (match_operand 2 "" "")))
+ (return)]
+ ""
+ "@
+ b %1
+ jr %1"
+ [(set_attr "type" "call")
+ (set_attr "length" "4")])
+
+(define_expand "prologue"
+ [(pc)]
+ ""
+{
+ epiphany_expand_prologue ();
+ DONE;
+})
+
+(define_expand "epilogue"
+ [(pc)]
+ ""
+{
+ epiphany_expand_epilogue (0);
+ DONE;
+})
+
+(define_expand "sibcall_epilogue"
+ [(pc)]
+ ""
+{
+ epiphany_expand_epilogue (1);
+ DONE;
+})
+
+; Since the demise of REG_N_SETS, it is no longer possible to find out
+; in the prologue / epilogue expanders how many times lr is set.
+; Using df_regs_ever_live_p to decide if lr needs saving means that
+; any explicit use of lr will cause it to be saved; hence we cannot
+; represent the blink use in return / sibcall instructions themselves, and
+; instead have to show it in EPILOGUE_USES.
+(define_insn "return_i"
+ [(return)]
+ "reload_completed"
+ "rts"
+ [(set_attr "type" "uncond_branch")])
+
+(define_insn "return_internal_interrupt"
+ [(return)
+ (unspec_volatile [(const_int 0)] 1)]
+ ""
+ "rti"
+ [(set_attr "type" "uncond_branch")])
+
+(define_insn "stack_adjust_add"
+ [(set (reg:SI GPR_SP)
+ (plus:SI (reg:SI GPR_SP) (match_operand:SI 0 "arith_operand" "rL")))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (reg:SI STATUS_REGNUM))
+ (clobber (match_operand:BLK 1 "memory_operand" "=m"))]
+ "reload_completed"
+ "add sp,sp,%0")
+
+(define_insn "stack_adjust_mov"
+ [(set (reg:SI GPR_SP) (reg:SI GPR_FP))
+ (clobber (match_operand:BLK 0 "memory_operand" "=m"))]
+ "reload_completed"
+ "mov sp,fp"
+ [(set_attr "type" "move")])
+
+(define_insn "stack_adjust_str"
+ [(set (match_operand 0 "stacktop_operand" "=m")
+ (match_operand 1 "any_gpr_operand" "r"))
+ (set (reg:SI GPR_SP)
+ (plus:SI (reg:SI GPR_SP) (match_operand:SI 2 "nonmemory_operand" "rn")))
+ (clobber (match_operand:BLK 3 "memory_operand" "=m"))]
+ "reload_completed"
+{
+ return (GET_MODE_SIZE (GET_MODE (operands[0])) <= 4
+ ? \"str %1,%0,%C2\" : \"strd %1,%0,%X2\");
+}
+ [(set_attr "type" "store")])
+
+(define_insn "stack_adjust_ldr"
+ [(set (match_operand:SI 0 "gpr_operand" "=r")
+ (match_operand:SI 1 "stacktop_operand" "m"))
+ (set (reg:SI GPR_SP)
+ (plus:SI (reg:SI GPR_SP) (match_operand:SI 2 "nonmemory_operand" "rn")))
+ (clobber (match_operand:BLK 3 "memory_operand" "=m"))]
+ "reload_completed"
+ "ldr %0,%1,%C2"
+ [(set_attr "type" "load")])
+
+;; Define some fake vector operations so that the vectorizer is happy to use
+;; 64 bit loads/stores.
+(define_expand "vec_unpacks_lo_v4hi"
+ [(match_operand:V2SI 0 "gpr_operand")
+ (match_operand:V4HI 1 "gpr_operand")]
+ ""
+{
+ rtx in = simplify_gen_subreg (SImode, operands[1], V4HImode, 0);
+ rtx outl = simplify_gen_subreg (SImode, operands[0], V2SImode, 0);
+ rtx outh
+ = simplify_gen_subreg (SImode, operands[0], V2SImode, UNITS_PER_WORD);
+
+ if (reg_overlap_mentioned_p (outl, in))
+ in = copy_to_mode_reg (SImode, in);
+ emit_insn (gen_ashlsi3 (outl, in, GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (outl, outl, GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (outh, in, GEN_INT (16)));
+ DONE;
+})
+
+(define_expand "vec_unpacks_hi_v4hi"
+ [(match_operand:V2SI 0 "gpr_operand")
+ (match_operand:V4HI 1 "gpr_operand")]
+ ""
+{
+ rtx in = simplify_gen_subreg (SImode, operands[1], V4HImode, UNITS_PER_WORD);
+ rtx outl = simplify_gen_subreg (SImode, operands[0], V2SImode, 0);
+ rtx outh
+ = simplify_gen_subreg (SImode, operands[0], V2SImode, UNITS_PER_WORD);
+
+ if (reg_overlap_mentioned_p (outl, in))
+ in = copy_to_mode_reg (SImode, in);
+ emit_insn (gen_ashlsi3 (outl, in, GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (outl, outl, GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (outh, in, GEN_INT (16)));
+ DONE;
+})
+
+(define_code_iterator addsub [plus minus])
+
+(define_code_iterator alu_binop
+ [plus minus and ior xor])
+
+(define_code_attr insn_opname
+ [(plus "add") (minus "sub") (mult "mul") (div "div")
+ (and "and") (ior "ior") (xor "xor")])
+
+; You might think that this would work better as a define_expand, but
+; again lower_subreg pessimizes the code if it sees indiviudual operations.
+; We need to keep inputs and outputs as register pairs if we want to
+; get sensible register allocation for double-word load and store operations.
+(define_insn_and_split "<insn_opname>v2si3"
+ [(set (match_operand:V2SI 0 "gpr_operand" "=r")
+ (alu_binop:V2SI (match_operand:V2SI 1 "gpr_operand" "r")
+ (match_operand:V2SI 2 "gpr_operand" "r")))
+ (clobber (reg:CC CC_REGNUM))]
+ ""
+ "#"
+ "reload_completed || (epiphany_vect_align == 4 && TARGET_SPLIT_VECMOVE_EARLY)"
+ [(const_int 0)]
+{
+ rtx o0l, o0h, o1l, o1h, o2l, o2h;
+
+ o0l = simplify_gen_subreg (SImode, operands[0], V2SImode, 0);
+ o0h = simplify_gen_subreg (SImode, operands[0], V2SImode, UNITS_PER_WORD);
+ o1l = simplify_gen_subreg (SImode, operands[1], V2SImode, 0);
+ o1h = simplify_gen_subreg (SImode, operands[1], V2SImode, UNITS_PER_WORD);
+ o2l = simplify_gen_subreg (SImode, operands[2], V2SImode, 0);
+ o2h = simplify_gen_subreg (SImode, operands[2], V2SImode, UNITS_PER_WORD);
+ if (reg_overlap_mentioned_p (o0l, o1h))
+ o1h = copy_to_mode_reg (SImode, o1h);
+ if (reg_overlap_mentioned_p (o0l, o2h))
+ o2h = copy_to_mode_reg (SImode, o2h);
+ emit_insn (gen_<insn_opname>si3 (o0l, o1l, o2l));
+ emit_insn (gen_<insn_opname>si3 (o0h, o1h, o2h));
+ DONE;
+}
+ [(set_attr "length" "8")])
+
+(define_expand "<insn_opname>v2sf3"
+ [(parallel
+ [(set (match_operand:V2SF 0 "gpr_operand" "")
+ (addsub:V2SF (match_operand:V2SF 1 "gpr_operand" "")
+ (match_operand:V2SF 2 "gpr_operand" "")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])])
+
+(define_insn_and_split "<insn_opname>v2sf3_i"
+ [(match_parallel 3 "float_operation"
+ [(set (match_operand:V2SF 0 "gpr_operand" "=r")
+ (addsub:V2SF (match_operand:V2SF 1 "gpr_operand" "r")
+ (match_operand:V2SF 2 "gpr_operand" "r")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "#"
+ "reload_completed || (epiphany_vect_align == 4 && TARGET_SPLIT_VECMOVE_EARLY)"
+ [(parallel
+ [(set (match_dup 4) (addsub:SF (match_dup 5) (match_dup 6)))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 10)
+ (match_dup 11)])
+ (parallel
+ [(set (match_dup 7) (addsub:SF (match_dup 8) (match_dup 9)))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 10)
+ (match_dup 11)])]
+{
+ operands[4] = simplify_gen_subreg (SFmode, operands[0], V2SFmode, 0);
+ operands[5] = simplify_gen_subreg (SFmode, operands[1], V2SFmode, 0);
+ operands[6] = simplify_gen_subreg (SFmode, operands[2], V2SFmode, 0);
+ operands[7]
+ = simplify_gen_subreg (SFmode, operands[0], V2SFmode, UNITS_PER_WORD);
+ operands[8]
+ = simplify_gen_subreg (SFmode, operands[1], V2SFmode, UNITS_PER_WORD);
+ operands[9]
+ = simplify_gen_subreg (SFmode, operands[2], V2SFmode, UNITS_PER_WORD);
+ if (!reload_completed)
+ {
+ if (reg_overlap_mentioned_p (operands[4], operands[8]))
+ operands[8] = copy_to_mode_reg (SFmode, operands[8]);
+ if (reg_overlap_mentioned_p (operands[4], operands[9]))
+ operands[9] = copy_to_mode_reg (SFmode, operands[9]);
+ emit_insn (gen_<insn_opname>sf3 (operands[4], operands[5], operands[6]));
+ emit_insn (gen_<insn_opname>sf3 (operands[7], operands[8], operands[9]));
+ DONE;
+ }
+ gcc_assert (!reg_overlap_mentioned_p (operands[4], operands[8]));
+ gcc_assert (!reg_overlap_mentioned_p (operands[4], operands[9]));
+ operands[10] = XVECEXP (operands[3], 0, XVECLEN (operands[3], 0) - 2);
+ operands[11] = XVECEXP (operands[3], 0, XVECLEN (operands[3], 0) - 1);
+}
+ [(set_attr "length" "8")
+ (set_attr "type" "fp")])
+
+(define_expand "mul<mode>3"
+ [(parallel
+ [(set (match_operand:DWV2MODE 0 "gpr_operand" "")
+ (mult:DWV2MODE (match_operand:DWV2MODE 1 "gpr_operand" "")
+ (match_operand:DWV2MODE 2 "gpr_operand" "")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])])
+
+(define_insn_and_split "mul<mode>3_i"
+ [(match_parallel 3 "float_operation"
+ [(set (match_operand:DWV2MODE 0 "gpr_operand" "=r")
+ (mult:DWV2MODE (match_operand:DWV2MODE 1 "gpr_operand" "r")
+ (match_operand:DWV2MODE 2 "gpr_operand" "r")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ ""
+ "#"
+ "reload_completed || (epiphany_vect_align == 4 && TARGET_SPLIT_VECMOVE_EARLY)"
+ [(parallel
+ [(set (match_dup 4) (mult:<vmode_PART> (match_dup 5) (match_dup 6)))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 10)
+ (match_dup 11)])
+ (parallel
+ [(set (match_dup 7) (mult:<vmode_PART> (match_dup 8) (match_dup 9)))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 10)
+ (match_dup 11)])]
+{
+ operands[4]
+ = simplify_gen_subreg (<vmode_PART>mode, operands[0], <MODE>mode, 0);
+ operands[5]
+ = simplify_gen_subreg (<vmode_PART>mode, operands[1], <MODE>mode, 0);
+ operands[6]
+ = simplify_gen_subreg (<vmode_PART>mode, operands[2], <MODE>mode, 0);
+ operands[7] = simplify_gen_subreg (<vmode_PART>mode, operands[0],
+ <MODE>mode, UNITS_PER_WORD);
+ operands[8] = simplify_gen_subreg (<vmode_PART>mode, operands[1],
+ <MODE>mode, UNITS_PER_WORD);
+ operands[9] = simplify_gen_subreg (<vmode_PART>mode, operands[2],
+ <MODE>mode, UNITS_PER_WORD);
+ if (!reload_completed)
+ {
+ if (reg_overlap_mentioned_p (operands[4], operands[8]))
+ operands[8] = copy_to_mode_reg (<vmode_PART>mode, operands[8]);
+ if (reg_overlap_mentioned_p (operands[4], operands[9]))
+ operands[9] = copy_to_mode_reg (<vmode_PART>mode, operands[9]);
+ emit_insn (gen_mul<vmode_part>3 (operands[4], operands[5], operands[6]));
+ emit_insn (gen_mul<vmode_part>3 (operands[7], operands[8], operands[9]));
+ DONE;
+ }
+ gcc_assert (!reg_overlap_mentioned_p (operands[4], operands[8]));
+ gcc_assert (!reg_overlap_mentioned_p (operands[4], operands[9]));
+ operands[10] = XVECEXP (operands[3], 0, XVECLEN (operands[3], 0) - 2);
+ operands[11] = XVECEXP (operands[3], 0, XVECLEN (operands[3], 0) - 1);
+}
+ [(set_attr "length" "8")
+ (set_attr "type" "<vmode_fp_type>")])
+
+(define_insn_and_split "*fmadd<mode>_combine"
+ [(match_parallel 4 "float_operation"
+ [(set (match_operand:DWV2MODE 0 "gpr_operand" "=r")
+ (plus:DWV2MODE (mult:<MODE>
+ (match_operand:<MODE> 1 "gpr_operand" "r")
+ (match_operand:<MODE> 2 "gpr_operand" "r"))
+ (match_operand:<MODE> 3 "gpr_operand" "0")))
+ (clobber (reg:CC_FP CCFP_REGNUM))])]
+ "TARGET_FUSED_MADD || <MODE>mode == V2SImode"
+ "#"
+ "reload_completed || (epiphany_vect_align == 4 && TARGET_SPLIT_VECMOVE_EARLY)"
+ [(parallel
+ [(set (match_dup 5)
+ (plus:<vmode_PART> (mult:<vmode_PART> (match_dup 6) (match_dup 7))
+ (match_dup 8)))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 13)
+ (match_dup 14)])
+ (parallel
+ [(set (match_dup 9)
+ (plus:<vmode_PART> (mult:<vmode_PART> (match_dup 10) (match_dup 11))
+ (match_dup 12)))
+ (clobber (reg:CC_FP CCFP_REGNUM))
+ (match_dup 13)
+ (match_dup 14)])]
+{
+ operands[5]
+ = simplify_gen_subreg (<vmode_PART>mode, operands[0], <MODE>mode, 0);
+ operands[6]
+ = simplify_gen_subreg (<vmode_PART>mode, operands[1], <MODE>mode, 0);
+ operands[7]
+ = simplify_gen_subreg (<vmode_PART>mode, operands[2], <MODE>mode, 0);
+ operands[8]
+ = simplify_gen_subreg (<vmode_PART>mode, operands[3], <MODE>mode, 0);
+ operands[9] = simplify_gen_subreg (<vmode_PART>mode, operands[0],
+ <MODE>mode, UNITS_PER_WORD);
+ operands[10] = simplify_gen_subreg (<vmode_PART>mode, operands[1],
+ <MODE>mode, UNITS_PER_WORD);
+ operands[11] = simplify_gen_subreg (<vmode_PART>mode, operands[2],
+ <MODE>mode, UNITS_PER_WORD);
+ operands[12] = simplify_gen_subreg (<vmode_PART>mode, operands[3],
+ <MODE>mode, UNITS_PER_WORD);
+ if (!reload_completed)
+ {
+ if (reg_overlap_mentioned_p (operands[5], operands[10]))
+ operands[10] = copy_to_mode_reg (<vmode_PART>mode, operands[10]);
+ if (reg_overlap_mentioned_p (operands[5], operands[11]))
+ operands[11] = copy_to_mode_reg (<vmode_PART>mode, operands[11]);
+ if (reg_overlap_mentioned_p (operands[5], operands[12]))
+ operands[12] = copy_to_mode_reg (<vmode_PART>mode, operands[12]);
+ emit_insn (gen_madd<vmode_part> (operands[5], operands[6], operands[7],
+ operands[8]));
+ emit_insn (gen_madd<vmode_part> (operands[9], operands[10], operands[11],
+ operands[12]));
+ DONE;
+ }
+ gcc_assert (!reg_overlap_mentioned_p (operands[5], operands[10]));
+ gcc_assert (!reg_overlap_mentioned_p (operands[5], operands[11]));
+ gcc_assert (!reg_overlap_mentioned_p (operands[5], operands[12]));
+ operands[13] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 2);
+ operands[14] = XVECEXP (operands[4], 0, XVECLEN (operands[4], 0) - 1);
+}
+ [(set_attr "length" "8")
+ (set_attr "type" "<vmode_fp_type>")])
+
+(define_expand "vec_set<mode>"
+ [(match_operand:DWV2MODE 0 "register_operand")
+ (match_operand:<vmode_PART> 1 "register_operand")
+ (match_operand 2 "const_int_operand" "")]
+ ""
+{
+ operands[0]
+ = simplify_gen_subreg (<vmode_PART>mode, operands[0], <MODE>mode,
+ UNITS_PER_WORD * INTVAL (operands[2]));
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "type" "flow")])
diff --git a/gcc/config/epiphany/epiphany.opt b/gcc/config/epiphany/epiphany.opt
new file mode 100644
index 00000000000..374018260d4
--- /dev/null
+++ b/gcc/config/epiphany/epiphany.opt
@@ -0,0 +1,140 @@
+; Options for the Adapteva EPIPHANY port of the compiler
+;
+; Copyright (C) 2005, 2007, 2009, 2011 Free Software Foundation, Inc.
+; Contributed by Embecosm on behalf of Adapteva, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+mhalf-reg-file
+Target Mask(HALF_REG_FILE)
+Don't use any of r32..r63.
+
+mprefer-short-insn-regs
+Target Mask(PREFER_SHORT_INSN_REGS)
+preferentially allocate registers that allow short instruction generation.
+
+mbranch-cost=
+Target RejectNegative Joined UInteger Var(epiphany_branch_cost) Init(3)
+Set branch cost
+
+mcmove
+Target Mask(CMOVE)
+enable conditional move instruction usage.
+
+mnops=
+Target RejectNegative Joined UInteger Var(epiphany_n_nops) Init(0)
+set number of nops to emit before each insn pattern
+
+; Problems with using the flags from fsub for comparison are:
+; - Because of underflow (lack of subnormal numbers), different small numbers
+; can compare as equal.
+; - the set of comparisons is limited, and reversing comparisons doesn't work
+; in the presence of NaNs.
+; The latter problem might be tolerated with -ffinite-math-only , but nothing
+; in -funsafe-math-optimizations says different small numbers may be considered
+; equal.
+msoft-cmpsf
+Target Mask(SOFT_CMPSF)
+Use software floating point comparisons
+
+msplit-lohi
+Target Mask(SPLIT_LOHI)
+Enable split of 32 bit immediate loads into low / high part
+
+mpost-inc
+Target Mask(POST_INC)
+Enable use of POST_INC / POST_DEC
+
+mpost-modify
+Target Mask(POST_MODIFY)
+Enable use of POST_MODIFY
+
+mstack-offset=
+Target RejectNegative Joined UInteger Var(epiphany_stack_offset) Init(EPIPHANY_STACK_OFFSET)
+Set number of bytes on the stack preallocated for use by the callee.
+
+mround-nearest
+target Mask(ROUND_NEAREST)
+Assume round to nearest is selected for purposes of scheduling.
+
+mlong-calls
+Target Mask(LONG_CALLS)
+Generate call insns as indirect calls
+
+mshort-calls
+Target Mask(SHORT_CALLS)
+Generate call insns as direct calls
+
+msmall16
+Target Mask(SMALL16)
+Assume labels and symbols can be addressed using 16 bit absolute addresses.
+
+mfp-mode=
+Target RejectNegative Joined Var(epiphany_normal_fp_mode) Enum(attr_fp_mode) Init(FP_MODE_CALLER)
+
+; The values are from enum attr_fp_mode, but using that enum would bring
+; problems with enum forward declarations.
+Enum
+Name(attr_fp_mode) Type(int)
+
+EnumValue
+Enum(attr_fp_mode) String(caller) Value(FP_MODE_CALLER)
+
+EnumValue
+Enum(attr_fp_mode) String(round-nearest) Value(FP_MODE_ROUND_NEAREST)
+
+EnumValue
+Enum(attr_fp_mode) String(truncate) Value(FP_MODE_ROUND_TRUNC)
+
+EnumValue
+Enum(attr_fp_mode) String(int) Value(FP_MODE_INT)
+
+mvect-double
+Target Mask(VECT_DOUBLE)
+Vectorize for double-word operations.
+
+max-vect-align=
+Target RejectNegative Joined Var(epiphany_vect_align) Enum(vect_align) Init(8)
+
+Enum
+Name(vect_align) Type(int)
+
+EnumValue
+Enum(vect_align) String(4) Value(4)
+
+EnumValue
+Enum(vect_align) String(8) Value(8)
+
+msplit-vecmove-early
+Target Mask(SPLIT_VECMOVE_EARLY)
+Split unaligned 8 byte vector moves before post-modify address generation.
+
+m1reg-
+Target RejectNegative Joined Var(epiphany_m1reg) Enum(m1reg) Init(-1)
+Set register to hold -1.
+
+Enum
+Name(m1reg) Type(int)
+
+EnumValue
+Enum(m1reg) String(none) Value(-1)
+
+EnumValue
+Enum(m1reg) String(r43) Value(43)
+
+EnumValue
+Enum(m1reg) String(r63) Value(63)
diff --git a/gcc/config/epiphany/epiphany_intrinsics.h b/gcc/config/epiphany/epiphany_intrinsics.h
new file mode 100644
index 00000000000..2c06b0c2504
--- /dev/null
+++ b/gcc/config/epiphany/epiphany_intrinsics.h
@@ -0,0 +1,27 @@
+/* Epiphany intrinsic functions
+ Copyright (C) 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#define __builtin_epiphany_fmadd(a, b, c) __builtin_fmaf (b, c, a)
+#define __builtin_epiphany_fmsub(a, b, c) __builtin_fmaf (-(b), c, a)
diff --git a/gcc/config/epiphany/mode-switch-use.c b/gcc/config/epiphany/mode-switch-use.c
new file mode 100644
index 00000000000..a7020f4393b
--- /dev/null
+++ b/gcc/config/epiphany/mode-switch-use.c
@@ -0,0 +1,91 @@
+/* Insert USEs in instructions that require mode switching.
+ This should probably be merged into mode-switching.c .
+ Copyright (C) 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "function.h"
+#include "emit-rtl.h"
+#include "tree-pass.h"
+#include "insn-attr.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "tm_p.h"
+#include "df.h"
+
+#ifndef TARGET_INSERT_MODE_SWITCH_USE
+#define TARGET_INSERT_MODE_SWITCH_USE NULL
+#endif
+
+static unsigned int
+insert_uses (void)
+{
+ static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING;
+#define N_ENTITIES ARRAY_SIZE (num_modes)
+ int e;
+ void (*target_insert_mode_switch_use) (rtx insn, int, int)
+ = TARGET_INSERT_MODE_SWITCH_USE;
+
+ for (e = N_ENTITIES - 1; e >= 0; e--)
+ {
+ int no_mode = num_modes[e];
+ rtx insn;
+ int mode;
+
+ if (!OPTIMIZE_MODE_SWITCHING (e))
+ continue;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (!INSN_P (insn))
+ continue;
+ mode = MODE_NEEDED (e, insn);
+ if (mode == no_mode)
+ continue;
+ if (target_insert_mode_switch_use)
+ {
+ target_insert_mode_switch_use (insn, e, mode);
+ df_insn_rescan (insn);
+ }
+ }
+ }
+ return 0;
+}
+
+struct rtl_opt_pass pass_mode_switch_use =
+{
+ {
+ RTL_PASS,
+ "mode_switch_use", /* name */
+ NULL, /* gate */
+ insert_uses, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+ }
+};
diff --git a/gcc/config/epiphany/predicates.md b/gcc/config/epiphany/predicates.md
new file mode 100644
index 00000000000..6e96af9fed4
--- /dev/null
+++ b/gcc/config/epiphany/predicates.md
@@ -0,0 +1,352 @@
+;; Predicate definitions for code generation on the EPIPHANY cpu.
+;; Copyright (C) 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
+;; 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
+;; Free Software Foundation, Inc.
+;; Contributed by Embecosm on behalf of Adapteva, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Returns true iff OP is a symbol reference that is a valid operand
+;; in a jump or call instruction.
+
+(define_predicate "symbolic_operand"
+ (match_code "symbol_ref,label_ref,const")
+{
+ if (GET_CODE (op) == SYMBOL_REF)
+ return (!epiphany_is_long_call_p (op)
+ && (!flag_pic || SYMBOL_REF_LOCAL_P (op)));
+ if (GET_CODE (op) == LABEL_REF)
+ return true;
+ if (GET_CODE (op) == CONST)
+ {
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != PLUS || !symbolic_operand (XEXP (op, 0), mode))
+ return false;
+ /* The idea here is that a 'small' constant offset should be OK.
+ What exactly is considered 'small' is a bit arbitrary. */
+ return satisfies_constraint_L (XEXP (op, 1));
+ }
+ gcc_unreachable ();
+})
+
+;; Acceptable arguments to the call insn.
+
+(define_predicate "call_address_operand"
+ (ior (match_code "reg")
+ (match_operand 0 "symbolic_operand")))
+
+(define_predicate "call_operand"
+ (match_code "mem")
+{
+ op = XEXP (op, 0);
+ return call_address_operand (op, mode);
+})
+
+;; general purpose register.
+(define_predicate "gpr_operand"
+ (match_code "reg,subreg")
+{
+ int regno;
+
+ if (!register_operand (op, mode))
+ return 0;
+ if (GET_CODE (op) == SUBREG)
+ op = XEXP (op, 0);
+ regno = REGNO (op);
+ return regno >= FIRST_PSEUDO_REGISTER || regno <= 63;
+})
+
+(define_special_predicate "any_gpr_operand"
+ (match_code "subreg,reg")
+{
+ return gpr_operand (op, mode);
+})
+
+;; register suitable for integer add / sub operations; besides general purpose
+;; registers we allow fake hard registers that are eliminated to a real
+;; hard register via an offset.
+(define_predicate "add_reg_operand"
+ (match_code "reg,subreg")
+{
+ int regno;
+
+ if (!register_operand (op, mode))
+ return 0;
+ if (GET_CODE (op) == SUBREG)
+ op = XEXP (op, 0);
+ regno = REGNO (op);
+ return (regno >= FIRST_PSEUDO_REGISTER || regno <= 63
+ || regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM);
+})
+
+;; Also allows suitable constants
+(define_predicate "add_operand"
+ (match_code "reg,subreg,const_int,symbol_ref,label_ref,const")
+{
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return add_reg_operand (op, mode);
+ return satisfies_constraint_L (op);
+})
+
+;; Ordinary 3rd operand for arithmetic operations
+(define_predicate "arith_operand"
+ (match_code "reg,subreg,const_int,symbol_ref,label_ref,const")
+{
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+ return satisfies_constraint_L (op);
+})
+
+;; Constant integer 3rd operand for arithmetic operations
+(define_predicate "arith_int_operand"
+ (match_code "const_int,symbol_ref,label_ref,const")
+{
+ return satisfies_constraint_L (op);
+})
+
+;; Return true if OP is an acceptable argument for a single word move source.
+
+(define_predicate "move_src_operand"
+ (match_code
+ "symbol_ref,label_ref,const,const_int,const_double,reg,subreg,mem,unspec")
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF :
+ case LABEL_REF :
+ case CONST :
+ return 1;
+ case CONST_INT :
+ return immediate_operand (op, mode);
+ case CONST_DOUBLE :
+ /* SImode constants should always fit into a CONST_INT. Large
+ unsigned 32-bit constants are represented as negative CONST_INTs. */
+ gcc_assert (GET_MODE (op) != SImode);
+ /* We can handle 32-bit floating point constants. */
+ if (mode == SFmode)
+ return GET_MODE (op) == SFmode;
+ return 0;
+ case REG :
+ return op != frame_pointer_rtx && register_operand (op, mode);
+ case SUBREG :
+ /* (subreg (mem ...) ...) can occur here if the inner part was once a
+ pseudo-reg and is now a stack slot. */
+ if (GET_CODE (SUBREG_REG (op)) == MEM)
+ return address_operand (XEXP (SUBREG_REG (op), 0), mode);
+ else
+ return register_operand (op, mode);
+ case MEM :
+ return address_operand (XEXP (op, 0), mode);
+ case UNSPEC:
+ return satisfies_constraint_Sra (op);
+ default :
+ return 0;
+ }
+})
+
+;; Return true if OP is an acceptable argument for a double word move source.
+
+(define_predicate "move_double_src_operand"
+ (match_code "reg,subreg,mem,const_int,const_double,const_vector")
+{
+ return general_operand (op, mode);
+})
+
+;; Return true if OP is an acceptable argument for a move destination.
+
+(define_predicate "move_dest_operand"
+ (match_code "reg,subreg,mem")
+{
+ switch (GET_CODE (op))
+ {
+ case REG :
+ return register_operand (op, mode);
+ case SUBREG :
+ /* (subreg (mem ...) ...) can occur here if the inner part was once a
+ pseudo-reg and is now a stack slot. */
+ if (GET_CODE (SUBREG_REG (op)) == MEM)
+ {
+ return address_operand (XEXP (SUBREG_REG (op), 0), mode);
+ }
+ else
+ {
+ return register_operand (op, mode);
+ }
+ case MEM :
+ return address_operand (XEXP (op, 0), mode);
+ default :
+ return 0;
+ }
+})
+
+(define_special_predicate "stacktop_operand"
+ (match_code "mem")
+{
+ if (mode != VOIDmode && GET_MODE (op) != mode)
+ return false;
+ return rtx_equal_p (XEXP (op, 0), stack_pointer_rtx);
+})
+
+;; Return 1 if OP is a comparison operator valid for the mode of CC.
+;; This allows the use of MATCH_OPERATOR to recognize all the branch insns.
+;;
+;; Some insns only set a few bits in the condition code. So only allow those
+;; comparisons that use the bits that are valid.
+
+(define_predicate "proper_comparison_operator"
+ (match_code "eq, ne, le, lt, ge, gt, leu, ltu, geu, gtu, unordered, ordered, uneq, unge, ungt, unle, unlt, ltgt")
+{
+ enum rtx_code code = GET_CODE (op);
+ rtx cc = XEXP (op, 0);
+
+ /* combine can try strange things. */
+ if (!REG_P (cc))
+ return 0;
+ switch (GET_MODE (cc))
+ {
+ case CC_Zmode:
+ case CC_N_NEmode:
+ case CC_FP_EQmode:
+ return REGNO (cc) == CC_REGNUM && (code == EQ || code == NE);
+ case CC_C_LTUmode:
+ return REGNO (cc) == CC_REGNUM && (code == LTU || code == GEU);
+ case CC_C_GTUmode:
+ return REGNO (cc) == CC_REGNUM && (code == GTU || code == LEU);
+ case CC_FPmode:
+ return (REGNO (cc) == CCFP_REGNUM
+ && (code == EQ || code == NE || code == LT || code == LE));
+ case CC_FP_GTEmode:
+ return (REGNO (cc) == CC_REGNUM
+ && (code == EQ || code == NE || code == GT || code == GE
+ || code == UNLE || code == UNLT));
+ case CC_FP_ORDmode:
+ return REGNO (cc) == CC_REGNUM && (code == ORDERED || code == UNORDERED);
+ case CC_FP_UNEQmode:
+ return REGNO (cc) == CC_REGNUM && (code == UNEQ || code == LTGT);
+ case CCmode:
+ return REGNO (cc) == CC_REGNUM;
+ /* From combiner. */
+ case QImode: case SImode: case SFmode: case HImode:
+ /* From cse.c:dead_libcall_p. */
+ case DFmode:
+ return 0;
+ default:
+ gcc_unreachable ();
+ }
+})
+
+(define_predicate "cc_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == CC_REGNUM || REGNO (op) == CCFP_REGNUM")))
+
+(define_predicate "const0_operand"
+ (match_code "const_int, const_double")
+{
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+ return op == CONST0_RTX (mode);
+})
+
+(define_predicate "const_float_1_operand"
+ (match_code "const_double")
+{
+ return op == CONST1_RTX (mode);
+})
+
+(define_predicate "cc_move_operand"
+ (and (match_code "reg")
+ (ior (match_test "REGNO (op) == CC_REGNUM")
+ (match_test "gpr_operand (op, mode)"))))
+
+(define_predicate "float_operation"
+ (match_code "parallel")
+{
+ /* Most patterns start out with one SET and one CLOBBER, and gain a USE
+ or two of FP_NEAREST_REGNUM / FP_TRUNCATE_REGNUM / FP_ANYFP_REGNUM
+ after mode switching. The longer patterns are
+ all beyond length 4, and before mode switching, end with a
+ CLOBBER of CCFP_REGNUM. */
+ int count = XVECLEN (op, 0);
+ bool inserted = MACHINE_FUNCTION (cfun)->control_use_inserted;
+ int i;
+
+ if (count == 2)
+ return !inserted;
+
+ /* combine / recog will pass any old garbage here before checking the
+ rest of the insn. */
+ if (count <= 3)
+ return false;
+
+ i = 1;
+ if (count > 4)
+ for (i = 4; i < count; i++)
+ {
+ rtx x = XVECEXP (op, 0, i);
+
+ if (GET_CODE (x) == CLOBBER)
+ {
+ if (!REG_P (XEXP (x, 0)))
+ return false;
+ if (REGNO (XEXP (x, 0)) == CCFP_REGNUM)
+ {
+ if (count == i + 1)
+ return !inserted;
+ break;
+ }
+ /* Just an ordinary clobber, keep looking. */
+ }
+ else if (GET_CODE (x) == USE
+ || (GET_CODE (x) == SET && i == 2))
+ continue;
+ else
+ return false;
+ }
+ if (count != i + 3 || !inserted)
+ return false;
+ for (i = i+1; i < count; i++)
+ {
+ rtx x = XVECEXP (op, 0, i);
+
+ if (GET_CODE (x) != USE && GET_CODE (x) != CLOBBER)
+ return false;
+ x = XEXP (x, 0);
+ if (!REG_P (x)
+ || (REGNO (x) != FP_NEAREST_REGNUM
+ && REGNO (x) != FP_TRUNCATE_REGNUM
+ && REGNO (x) != FP_ANYFP_REGNUM))
+ return false;
+ }
+ return true;
+})
+
+(define_predicate "set_fp_mode_operand"
+ (ior (match_test "gpr_operand (op, mode)")
+ (and (match_code "const")
+ (match_test "satisfies_constraint_Cfm (op)"))))
+
+(define_predicate "post_modify_address"
+ (match_code "post_modify,post_inc,post_dec"))
+
+(define_predicate "post_modify_operand"
+ (and (match_code "mem")
+ (match_test "post_modify_address (XEXP (op, 0), Pmode)")))
+
+(define_predicate "nonsymbolic_immediate_operand"
+ (ior (match_test "immediate_operand (op, mode)")
+ (match_code "const_vector"))) /* Is this specific enough? */
diff --git a/gcc/config/epiphany/resolve-sw-modes.c b/gcc/config/epiphany/resolve-sw-modes.c
new file mode 100644
index 00000000000..9564d752c4f
--- /dev/null
+++ b/gcc/config/epiphany/resolve-sw-modes.c
@@ -0,0 +1,182 @@
+/* Mode switching cleanup pass for the EPIPHANY cpu.
+ Copyright (C) 2000, 2011 Free Software Foundation, Inc.
+ Contributed by Embecosm on behalf of Adapteva, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "machmode.h"
+#include "tm.h"
+#include "hard-reg-set.h"
+#include "tm_p.h"
+#include "vec.h"
+#include "sbitmap.h"
+#include "basic-block.h"
+#include "df.h"
+#include "rtl.h"
+#include "insn-config.h"
+#include "insn-codes.h"
+#include "emit-rtl.h"
+#include "recog.h"
+#include "function.h"
+#include "insn-attr-common.h"
+#include "tree-pass.h"
+
+/* Clean-up after mode switching:
+ Check for mode setting insns that have FP_MODE_ROUND_UNKNOWN.
+ If only one rounding mode is required, select that one.
+ Else we have to choose one to use in this mode setting insn and
+ insert new mode setting insns on the edges where the other mode
+ becomes unambigous. */
+
+static bool
+gate_resolve_sw_modes (void)
+{
+ return optimize;
+}
+
+static unsigned
+resolve_sw_modes (void)
+{
+ basic_block bb;
+ rtx insn, src;
+ VEC (basic_block, heap) *todo;
+ sbitmap pushed;
+ bool need_commit = false;
+ bool finalize_fp_sets = (MACHINE_FUNCTION (cfun)->unknown_mode_sets == 0);
+
+ todo = VEC_alloc (basic_block, heap, last_basic_block);
+ pushed = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (pushed);
+ if (!finalize_fp_sets)
+ {
+ df_note_add_problem ();
+ df_analyze ();
+ }
+ FOR_EACH_BB (bb)
+ FOR_BB_INSNS (bb, insn)
+ {
+ enum attr_fp_mode selected_mode;
+
+ if (!NONJUMP_INSN_P (insn)
+ || recog_memoized (insn) != CODE_FOR_set_fp_mode)
+ continue;
+ src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
+ if (finalize_fp_sets)
+ {
+ SET_SRC (XVECEXP (PATTERN (insn), 0, 2)) = copy_rtx (src);
+ if (REG_P (src))
+ df_insn_rescan (insn);
+ continue;
+ }
+ if (REG_P (src)
+ || XINT (XVECEXP (XEXP (src, 0), 0, 0), 0) != FP_MODE_ROUND_UNKNOWN)
+ continue;
+ if (find_regno_note (insn, REG_UNUSED, FP_TRUNCATE_REGNUM))
+ selected_mode = FP_MODE_ROUND_NEAREST;
+ else if (find_regno_note (insn, REG_UNUSED, FP_NEAREST_REGNUM))
+ selected_mode = FP_MODE_ROUND_TRUNC;
+ else
+ {
+ /* We could get more fancy in the selection of the mode by
+ checking the total frequency of the affected edges. */
+ selected_mode = (enum attr_fp_mode) epiphany_normal_fp_rounding;
+
+ VEC_quick_push (basic_block, todo, bb);
+ SET_BIT (pushed, bb->index);
+ }
+ XVECEXP (XEXP (src, 0), 0, 0) = GEN_INT (selected_mode);
+ SET_SRC (XVECEXP (PATTERN (insn), 0, 1)) = copy_rtx (src);
+ SET_SRC (XVECEXP (PATTERN (insn), 0, 2)) = copy_rtx (src);
+ df_insn_rescan (insn);
+ }
+ while (VEC_length (basic_block, todo))
+ {
+ basic_block bb = VEC_pop (basic_block, todo);
+ int selected_reg, jilted_reg;
+ enum attr_fp_mode jilted_mode;
+ edge e;
+ edge_iterator ei;
+
+ SET_BIT (pushed, bb->index);
+ SET_BIT (pushed, bb->index);
+
+ if (epiphany_normal_fp_rounding == FP_MODE_ROUND_NEAREST)
+ {
+ selected_reg = FP_NEAREST_REGNUM;
+ jilted_reg = FP_TRUNCATE_REGNUM;
+ jilted_mode = FP_MODE_ROUND_TRUNC;
+ }
+ else
+ {
+ selected_reg = FP_TRUNCATE_REGNUM;
+ jilted_reg = FP_NEAREST_REGNUM;
+ jilted_mode = FP_MODE_ROUND_NEAREST;
+ }
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ basic_block succ = e->dest;
+ rtx seq;
+
+ if (!REGNO_REG_SET_P (DF_LIVE_IN (succ), jilted_reg))
+ continue;
+ if (REGNO_REG_SET_P (DF_LIVE_IN (succ), selected_reg))
+ {
+ if (TEST_BIT (pushed, succ->index))
+ continue;
+ VEC_quick_push (basic_block, todo, succ);
+ SET_BIT (pushed, bb->index);
+ continue;
+ }
+ start_sequence ();
+ emit_set_fp_mode (EPIPHANY_MSW_ENTITY_ROUND_UNKNOWN,
+ jilted_mode, NULL);
+ seq = get_insns ();
+ end_sequence ();
+ need_commit = true;
+ insert_insn_on_edge (seq, e);
+ }
+ }
+ VEC_free (basic_block, heap, todo);
+ sbitmap_free (pushed);
+ if (need_commit)
+ commit_edge_insertions ();
+ return 0;
+}
+
+struct rtl_opt_pass pass_resolve_sw_modes =
+{
+ {
+ RTL_PASS,
+ "resolve_sw_modes", /* name */
+ gate_resolve_sw_modes, /* gate */
+ resolve_sw_modes, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_MODE_SWITCH, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_df_finish | TODO_verify_rtl_sharing |
+ 0 /* todo_flags_finish */
+ }
+};
diff --git a/gcc/config/epiphany/t-epiphany b/gcc/config/epiphany/t-epiphany
new file mode 100644
index 00000000000..33db4acef13
--- /dev/null
+++ b/gcc/config/epiphany/t-epiphany
@@ -0,0 +1,32 @@
+# Copyright (C) 1997, 1998, 1999, 2001, 2002, 2003,
+# 2004, 2009, 2010, 2011 Free Software Foundation, Inc.
+# Contributed by Embecosm on behalf of Adapteva, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+mode-switch-use.o : $(srcdir)/config/epiphany/mode-switch-use.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TM_P_H) $(RTL_H) \
+ $(TREE_PASS_H) $(INSN_ATTR_H) $(EMIT_RTL_H) $(FUNCTION_H) $(RECOG_H) \
+ insn-config.h $(DF_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $<
+
+resolve-sw-modes.o : $(srcdir)/config/epiphany/resolve-sw-modes.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(MACHMODE_H) $(TM_H) hard-reg-set.h \
+ $(TM_P_H) $(VEC_H) sbitmap.h $(BASIC_BLOCK_H) $(DF_H) $(RTL_H) \
+ insn-config.h insn-codes.h $(EMIT_RTL_H) $(RECOG_H) $(FUNCTION_H) \
+ insn-attr-common.h $(TREE_PASS_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $<