aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/sh/sh.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/sh/sh.c')
-rw-r--r--gcc/config/sh/sh.c1273
1 files changed, 930 insertions, 343 deletions
diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c
index e1c81c9c178..a266471b063 100644
--- a/gcc/config/sh/sh.c
+++ b/gcc/config/sh/sh.c
@@ -108,7 +108,7 @@ rtx sh_compare_op1;
/* Provides the class number of the smallest class containing
reg number. */
-int regno_reg_class[FIRST_PSEUDO_REGISTER] =
+enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
{
R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
@@ -190,7 +190,7 @@ static rtx find_barrier PARAMS ((int, rtx, rtx));
static int noncall_uses_reg PARAMS ((rtx, rtx, rtx *));
static rtx gen_block_redirect PARAMS ((rtx, int, int));
static void sh_reorg PARAMS ((void));
-static void output_stack_adjust PARAMS ((int, rtx, int, rtx (*) (rtx)));
+static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
static rtx frame_insn PARAMS ((rtx));
static rtx push PARAMS ((int));
static void pop PARAMS ((int));
@@ -203,6 +203,7 @@ const struct attribute_spec sh_attribute_table[];
static tree sh_handle_interrupt_handler_attribute PARAMS ((tree *, tree, tree, int, bool *));
static tree sh_handle_sp_switch_attribute PARAMS ((tree *, tree, tree, int, bool *));
static tree sh_handle_trap_exit_attribute PARAMS ((tree *, tree, tree, int, bool *));
+static tree sh_handle_renesas_attribute PARAMS ((tree *, tree, tree, int, bool *));
static void sh_output_function_epilogue PARAMS ((FILE *, HOST_WIDE_INT));
static void sh_insert_attributes PARAMS ((tree, tree *));
static int sh_adjust_cost PARAMS ((rtx, rtx, rtx, int));
@@ -234,6 +235,19 @@ static int sh_address_cost PARAMS ((rtx));
static int shmedia_target_regs_stack_space (HARD_REG_SET *);
static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
+static int scavenge_reg (HARD_REG_SET *s);
+struct save_schedule_s;
+static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
+ struct save_schedule_s *, int);
+
+static bool sh_promote_prototypes PARAMS ((tree));
+static rtx sh_struct_value_rtx PARAMS ((tree, int));
+static bool sh_return_in_memory PARAMS ((tree, tree));
+static rtx sh_builtin_saveregs PARAMS ((void));
+static void sh_setup_incoming_varargs PARAMS ((CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int));
+static bool sh_strict_argument_naming PARAMS ((CUMULATIVE_ARGS *));
+static bool sh_pretend_outgoing_varargs_named PARAMS ((CUMULATIVE_ARGS *));
+
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
@@ -311,6 +325,27 @@ static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
#define TARGET_HAVE_TLS true
#endif
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
+#undef TARGET_PROMOTE_FUNCTION_ARGS
+#define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
+#undef TARGET_PROMOTE_FUNCTION_RETURN
+#define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY sh_return_in_memory
+
+#undef TARGET_EXPAND_BUILTIN_SAVEREGS
+#define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
+#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
+#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
+
struct gcc_target targetm = TARGET_INITIALIZER;
/* Print the operand address in x to the stream. */
@@ -4558,17 +4593,16 @@ output_jump_label_table ()
static int extra_push;
-/* Adjust the stack by SIZE bytes. REG holds the rtl of the register
- to be adjusted, and TEMP, if nonnegative, holds the register number
- of a general register that we may clobber. */
+/* Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
+ adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
+ for an epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET
+ of all the registers that are about to be restored, and hence dead. */
static void
-output_stack_adjust (size, reg, temp, emit_fn)
- int size;
- rtx reg;
- int temp;
- rtx (*emit_fn) PARAMS ((rtx));
+output_stack_adjust (int size, rtx reg, int epilogue_p,
+ HARD_REG_SET *live_regs_mask)
{
+ rtx (*emit_fn) (rtx) = epilogue_p ? &emit_insn : &frame_insn;
if (size)
{
HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
@@ -4591,10 +4625,43 @@ output_stack_adjust (size, reg, temp, emit_fn)
{
rtx const_reg;
rtx insn;
+ int temp = epilogue_p ? 7 : (TARGET_SH5 ? 0 : 1);
+ int i;
/* If TEMP is invalid, we could temporarily save a general
register to MACL. However, there is currently no need
to handle this case, so just abort when we see it. */
+ if (current_function_interrupt
+ || ! call_used_regs[temp] || fixed_regs[temp])
+ temp = -1;
+ if (temp < 0 && ! current_function_interrupt)
+ {
+ HARD_REG_SET temps;
+ COPY_HARD_REG_SET (temps, call_used_reg_set);
+ AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
+ if (epilogue_p)
+ {
+ for (i = 0; i < HARD_REGNO_NREGS (FIRST_RET_REG, DImode); i++)
+ CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
+ if (current_function_calls_eh_return)
+ {
+ CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
+ for (i = 0; i <= 3; i++)
+ CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
+ }
+ }
+ else
+ {
+ for (i = FIRST_PARM_REG;
+ i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
+ CLEAR_HARD_REG_BIT (temps, i);
+ if (current_function_needs_context)
+ CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
+ }
+ temp = scavenge_reg (&temps);
+ }
+ if (temp < 0 && live_regs_mask)
+ temp = scavenge_reg (live_regs_mask);
if (temp < 0)
abort ();
const_reg = gen_rtx_REG (GET_MODE (reg), temp);
@@ -4612,7 +4679,7 @@ output_stack_adjust (size, reg, temp, emit_fn)
emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
}
- if (emit_fn == frame_insn)
+ if (! epilogue_p)
REG_NOTES (insn)
= (gen_rtx_EXPR_LIST
(REG_FRAME_RELATED_EXPR,
@@ -4789,12 +4856,11 @@ calc_live_regs (live_regs_mask)
int reg;
int count;
int interrupt_handler;
- int pr_live;
+ int pr_live, has_call;
interrupt_handler = sh_cfun_interrupt_handler_p ();
- for (count = 0; 32 * count < FIRST_PSEUDO_REGISTER; count++)
- CLEAR_HARD_REG_SET (*live_regs_mask);
+ CLEAR_HARD_REG_SET (*live_regs_mask);
if (TARGET_SH4 && TARGET_FMOVD && interrupt_handler
&& regs_ever_live[FPSCR_REG])
target_flags &= ~FPU_SINGLE_BIT;
@@ -4813,7 +4879,9 @@ calc_live_regs (live_regs_mask)
the initial value can become the PR_MEDIA_REG hard register, as seen for
execute/20010122-1.c:test9. */
if (TARGET_SHMEDIA)
- pr_live = regs_ever_live[PR_MEDIA_REG];
+ /* ??? this function is called from initial_elimination_offset, hence we
+ can't use the result of sh_media_register_for_return here. */
+ pr_live = sh_pr_n_sets ();
else
{
rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
@@ -4821,6 +4889,10 @@ calc_live_regs (live_regs_mask)
? (GET_CODE (pr_initial) != REG
|| REGNO (pr_initial) != (PR_REG))
: regs_ever_live[PR_REG]);
+ /* For Shcompact, if not optimizing, we end up with a memory reference
+ using the return address pointer for __builtin_return_address even
+ though there is no actual need to put the PR register on the stack. */
+ pr_live |= regs_ever_live[RETURN_ADDRESS_POINTER_REGNUM];
}
/* Force PR to be live if the prologue has to call the SHmedia
argument decoder or register saver. */
@@ -4829,6 +4901,7 @@ calc_live_regs (live_regs_mask)
& ~ CALL_COOKIE_RET_TRAMP (1))
|| current_function_has_nonlocal_label))
pr_live = 1;
+ has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
for (count = 0, reg = FIRST_PSEUDO_REGISTER - 1; reg >= 0; reg--)
{
if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
@@ -4838,7 +4911,9 @@ calc_live_regs (live_regs_mask)
(regs_ever_live[reg]
|| (call_used_regs[reg]
&& (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG)
- && pr_live))
+ && has_call)
+ || (has_call && REGISTER_NATURAL_MODE (reg) == SImode
+ && (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
&& reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
&& reg != RETURN_ADDRESS_POINTER_REGNUM
&& reg != T_REG && reg != GBR_REG
@@ -4848,13 +4923,17 @@ calc_live_regs (live_regs_mask)
(TARGET_SHCOMPACT
&& flag_pic
&& current_function_args_info.call_cookie
- && reg == PIC_OFFSET_TABLE_REGNUM)
+ && reg == (int) PIC_OFFSET_TABLE_REGNUM)
|| (regs_ever_live[reg] && ! call_used_regs[reg])
|| (current_function_calls_eh_return
- && (reg == EH_RETURN_DATA_REGNO (0)
- || reg == EH_RETURN_DATA_REGNO (1)
- || reg == EH_RETURN_DATA_REGNO (2)
- || reg == EH_RETURN_DATA_REGNO (3)))))
+ && (reg == (int) EH_RETURN_DATA_REGNO (0)
+ || reg == (int) EH_RETURN_DATA_REGNO (1)
+ || reg == (int) EH_RETURN_DATA_REGNO (2)
+ || reg == (int) EH_RETURN_DATA_REGNO (3)))
+ || ((reg == MACL_REG || reg == MACH_REG)
+ && regs_ever_live[reg]
+ && sh_cfun_attr_renesas_p ())
+ ))
{
SET_HARD_REG_BIT (*live_regs_mask, reg);
count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
@@ -4891,6 +4970,19 @@ calc_live_regs (live_regs_mask)
SET_HARD_REG_BIT (*live_regs_mask, reg);
count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
}
+ /* If this is an interrupt handler, we don't have any call-clobbered
+ registers we can conveniently use for target register save/restore.
+ Make sure we save at least one general purpose register when we need
+ to save target registers. */
+ if (interrupt_handler
+ && hard_regs_intersect_p (live_regs_mask,
+ &reg_class_contents[TARGET_REGS])
+ && ! hard_regs_intersect_p (live_regs_mask,
+ &reg_class_contents[GENERAL_REGS]))
+ {
+ SET_HARD_REG_BIT (*live_regs_mask, R0_REG);
+ count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG));
+ }
return count;
}
@@ -4921,6 +5013,9 @@ sh_media_register_for_return ()
if (! current_function_is_leaf)
return -1;
+ if (lookup_attribute ("interrupt_handler",
+ DECL_ATTRIBUTES (current_function_decl)))
+ return -1;
tr0_used = flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM];
@@ -4931,6 +5026,130 @@ sh_media_register_for_return ()
return -1;
}
+/* The maximum registers we need to save are:
+ - 62 general purpose registers (r15 is stack pointer, r63 is zero)
+ - 32 floating point registers (for each pair, we save none,
+ one single precision value, or a double precision value).
+ - 8 target registers
+ - add 1 entry for a delimiter. */
+#define MAX_SAVED_REGS (62+32+8)
+
+typedef struct save_entry_s
+{
+ unsigned char reg;
+ unsigned char mode;
+ short offset;
+} save_entry;
+
+#define MAX_TEMPS 4
+
+/* There will be a delimiter entry with VOIDmode both at the start and the
+ end of a filled in schedule. The end delimiter has the offset of the
+ save with the smallest (i.e. most negative) offset. */
+typedef struct save_schedule_s
+{
+ save_entry entries[MAX_SAVED_REGS + 2];
+ int temps[MAX_TEMPS+1];
+} save_schedule;
+
+/* Fill in SCHEDULE according to LIVE_REGS_MASK. If RESTORE is nonzero,
+ use reverse order. Returns the last entry written to (not counting
+ the delimiter). OFFSET_BASE is a number to be added to all offset
+ entries. */
+
+static save_entry *
+sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
+ int offset_base)
+{
+ int align, i;
+ save_entry *entry = schedule->entries;
+ int tmpx = 0;
+ int offset;
+
+ if (! current_function_interrupt)
+ for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
+ if (call_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
+ && ! FUNCTION_ARG_REGNO_P (i)
+ && i != FIRST_RET_REG
+ && ! (current_function_needs_context && i == STATIC_CHAIN_REGNUM)
+ && ! (current_function_calls_eh_return
+ && (i == EH_RETURN_STACKADJ_REGNO
+ || ((unsigned)i <= EH_RETURN_DATA_REGNO (0)
+ && (unsigned)i >= EH_RETURN_DATA_REGNO (3)))))
+ schedule->temps[tmpx++] = i;
+ entry->reg = -1;
+ entry->mode = VOIDmode;
+ entry->offset = offset_base;
+ entry++;
+ /* We loop twice: first, we save 8-byte aligned registers in the
+ higher addresses, that are known to be aligned. Then, we
+ proceed to saving 32-bit registers that don't need 8-byte
+ alignment.
+ If this is an interrupt function, all registers that need saving
+ need to be saved in full. moreover, we need to postpone saving
+ target registers till we have saved some general purpose registers
+ we can then use as scratch registers. */
+ offset = offset_base;
+ for (align = 1; align >= 0; align--)
+ {
+ for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
+ if (TEST_HARD_REG_BIT (*live_regs_mask, i))
+ {
+ enum machine_mode mode = REGISTER_NATURAL_MODE (i);
+ int reg = i;
+
+ if (current_function_interrupt)
+ {
+ if (TARGET_REGISTER_P (i))
+ continue;
+ if (GENERAL_REGISTER_P (i))
+ mode = DImode;
+ }
+ if (mode == SFmode && (i % 2) == 1
+ && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
+ && (TEST_HARD_REG_BIT (*live_regs_mask, (i ^ 1))))
+ {
+ mode = DFmode;
+ i--;
+ reg--;
+ }
+
+ /* If we're doing the aligned pass and this is not aligned,
+ or we're doing the unaligned pass and this is aligned,
+ skip it. */
+ if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT) == 0)
+ != align)
+ continue;
+
+ if (current_function_interrupt
+ && GENERAL_REGISTER_P (i)
+ && tmpx < MAX_TEMPS)
+ schedule->temps[tmpx++] = i;
+
+ offset -= GET_MODE_SIZE (mode);
+ entry->reg = i;
+ entry->mode = mode;
+ entry->offset = offset;
+ entry++;
+ }
+ if (align && current_function_interrupt)
+ for (i = LAST_TARGET_REG; i >= FIRST_TARGET_REG; i--)
+ if (TEST_HARD_REG_BIT (*live_regs_mask, i))
+ {
+ offset -= GET_MODE_SIZE (DImode);
+ entry->reg = i;
+ entry->mode = DImode;
+ entry->offset = offset;
+ entry++;
+ }
+ }
+ entry->reg = -1;
+ entry->mode = VOIDmode;
+ entry->offset = offset;
+ schedule->temps[tmpx] = -1;
+ return entry - 1;
+}
+
void
sh_expand_prologue ()
{
@@ -4945,7 +5164,7 @@ sh_expand_prologue ()
and partially on the stack, e.g. a large structure. */
output_stack_adjust (-current_function_pretend_args_size
- current_function_args_info.stack_regs * 8,
- stack_pointer_rtx, TARGET_SH5 ? 0 : 1, frame_insn);
+ stack_pointer_rtx, 0, NULL);
extra_push = 0;
@@ -4991,6 +5210,9 @@ sh_expand_prologue ()
rtx insn = emit_move_insn (gen_rtx_REG (DImode, tr),
gen_rtx_REG (DImode, PR_MEDIA_REG));
+ /* ??? We should suppress saving pr when we don't need it, but this
+ is tricky because of builtin_return_address. */
+
/* If this function only exits with sibcalls, this copy
will be flagged as dead. */
REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
@@ -5003,7 +5225,8 @@ sh_expand_prologue ()
if (current_function_stdarg)
{
/* This is not used by the SH2E calling convention */
- if (TARGET_SH1 && ! TARGET_SH2E && ! TARGET_SH5 && ! TARGET_HITACHI)
+ if (TARGET_SH1 && ! TARGET_SH2E && ! TARGET_SH5
+ && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
{
/* Push arg regs as if they'd been provided by caller in stack. */
for (i = 0; i < NPARM_REGS(SImode); i++)
@@ -5034,14 +5257,19 @@ sh_expand_prologue ()
if (TARGET_SH5)
{
- int i;
- int offset;
- int align;
- rtx r0 = gen_rtx_REG (Pmode, R0_REG);
+ int offset_base, offset;
+ rtx r0 = NULL_RTX;
int offset_in_r0 = -1;
int sp_in_r0 = 0;
int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
int total_size, save_size;
+ save_schedule schedule;
+ save_entry *entry;
+ int *tmp_pnt;
+
+ if (call_used_regs[R0_REG] && ! fixed_regs[R0_REG]
+ && ! current_function_interrupt)
+ r0 = gen_rtx_REG (Pmode, R0_REG);
/* D is the actual number of bytes that we need for saving registers,
however, in initial_elimination_offset we have committed to using
@@ -5067,146 +5295,153 @@ sh_expand_prologue ()
&& total_size <= 2044)))
d_rounding = total_size - save_size;
- offset = d + d_rounding;
+ offset_base = d + d_rounding;
output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
- 1, frame_insn);
-
- /* We loop twice: first, we save 8-byte aligned registers in the
- higher addresses, that are known to be aligned. Then, we
- proceed to saving 32-bit registers that don't need 8-byte
- alignment. */
- /* Note that if you change this code in a way that affects where
- the return register is saved, you have to update not only
- sh_expand_epilogue, but also sh_set_return_address. */
- for (align = 1; align >= 0; align--)
- for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
- if (TEST_HARD_REG_BIT (live_regs_mask, i))
- {
- enum machine_mode mode = REGISTER_NATURAL_MODE (i);
- int reg = i;
- rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
+ 0, NULL);
- if (mode == SFmode && (i % 2) == 1
- && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
- && (TEST_HARD_REG_BIT (live_regs_mask, (i ^ 1))))
- {
- mode = DFmode;
- i--;
- reg--;
- }
-
- /* If we're doing the aligned pass and this is not aligned,
- or we're doing the unaligned pass and this is aligned,
- skip it. */
- if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT)
- == 0) != align)
- continue;
+ sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
+ tmp_pnt = schedule.temps;
+ for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
+ {
+ enum machine_mode mode = entry->mode;
+ int reg = entry->reg;
+ rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
- offset -= GET_MODE_SIZE (mode);
+ offset = entry->offset;
- reg_rtx = gen_rtx_REG (mode, reg);
+ reg_rtx = gen_rtx_REG (mode, reg);
- mem_rtx = gen_rtx_MEM (mode,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (offset)));
+ mem_rtx = gen_rtx_MEM (mode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (offset)));
- GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_pre_dec);
+ GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_pre_dec);
- mem_rtx = NULL_RTX;
+ if (! r0)
+ abort ();
+ mem_rtx = NULL_RTX;
- try_pre_dec:
- do
- if (HAVE_PRE_DECREMENT
- && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
- || mem_rtx == NULL_RTX
- || i == PR_REG || SPECIAL_REGISTER_P (i)))
- {
- pre_dec = gen_rtx_MEM (mode,
- gen_rtx_PRE_DEC (Pmode, r0));
+ try_pre_dec:
+ do
+ if (HAVE_PRE_DECREMENT
+ && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
+ || mem_rtx == NULL_RTX
+ || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
+ {
+ pre_dec = gen_rtx_MEM (mode,
+ gen_rtx_PRE_DEC (Pmode, r0));
- GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (pre_dec, 0),
- pre_dec_ok);
+ GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (pre_dec, 0),
+ pre_dec_ok);
- pre_dec = NULL_RTX;
+ pre_dec = NULL_RTX;
- break;
+ break;
- pre_dec_ok:
- mem_rtx = NULL_RTX;
- offset += GET_MODE_SIZE (mode);
- }
- while (0);
+ pre_dec_ok:
+ mem_rtx = NULL_RTX;
+ offset += GET_MODE_SIZE (mode);
+ }
+ while (0);
- if (mem_rtx != NULL_RTX)
- goto addr_ok;
+ if (mem_rtx != NULL_RTX)
+ goto addr_ok;
- if (offset_in_r0 == -1)
- {
- emit_move_insn (r0, GEN_INT (offset));
- offset_in_r0 = offset;
- }
- else if (offset != offset_in_r0)
+ if (offset_in_r0 == -1)
+ {
+ emit_move_insn (r0, GEN_INT (offset));
+ offset_in_r0 = offset;
+ }
+ else if (offset != offset_in_r0)
+ {
+ emit_move_insn (r0,
+ gen_rtx_PLUS
+ (Pmode, r0,
+ GEN_INT (offset - offset_in_r0)));
+ offset_in_r0 += offset - offset_in_r0;
+ }
+
+ if (pre_dec != NULL_RTX)
+ {
+ if (! sp_in_r0)
{
emit_move_insn (r0,
gen_rtx_PLUS
- (Pmode, r0,
- GEN_INT (offset - offset_in_r0)));
- offset_in_r0 += offset - offset_in_r0;
+ (Pmode, r0, stack_pointer_rtx));
+ sp_in_r0 = 1;
}
-
- if (pre_dec != NULL_RTX)
- {
- if (! sp_in_r0)
- {
- emit_move_insn (r0,
- gen_rtx_PLUS
- (Pmode, r0, stack_pointer_rtx));
- sp_in_r0 = 1;
- }
- offset -= GET_MODE_SIZE (mode);
- offset_in_r0 -= GET_MODE_SIZE (mode);
+ offset -= GET_MODE_SIZE (mode);
+ offset_in_r0 -= GET_MODE_SIZE (mode);
- mem_rtx = pre_dec;
- }
- else if (sp_in_r0)
- mem_rtx = gen_rtx_MEM (mode, r0);
- else
- mem_rtx = gen_rtx_MEM (mode,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- r0));
-
- /* We must not use an r0-based address for target-branch
- registers or for special registers without pre-dec
- memory addresses, since we store their values in r0
- first. */
- if (TARGET_REGISTER_P (i)
- || ((i == PR_REG || SPECIAL_REGISTER_P (i))
- && mem_rtx != pre_dec))
- abort ();
-
- addr_ok:
- if (TARGET_REGISTER_P (i)
- || ((i == PR_REG || SPECIAL_REGISTER_P (i))
- && mem_rtx != pre_dec))
- {
- rtx r0mode = gen_rtx_REG (GET_MODE (reg_rtx), R0_REG);
+ mem_rtx = pre_dec;
+ }
+ else if (sp_in_r0)
+ mem_rtx = gen_rtx_MEM (mode, r0);
+ else
+ mem_rtx = gen_rtx_MEM (mode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ r0));
+
+ /* We must not use an r0-based address for target-branch
+ registers or for special registers without pre-dec
+ memory addresses, since we store their values in r0
+ first. */
+ if (TARGET_REGISTER_P (reg)
+ || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
+ && mem_rtx != pre_dec))
+ abort ();
- emit_move_insn (r0mode, reg_rtx);
+ addr_ok:
+ if (TARGET_REGISTER_P (reg)
+ || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
+ && mem_rtx != pre_dec))
+ {
+ rtx tmp_reg = gen_rtx_REG (GET_MODE (reg_rtx), *tmp_pnt);
+
+ emit_move_insn (tmp_reg, reg_rtx);
+ if (REGNO (tmp_reg) == R0_REG)
+ {
offset_in_r0 = -1;
sp_in_r0 = 0;
-
- reg_rtx = r0mode;
+ if (refers_to_regno_p (R0_REG, R0_REG+1, mem_rtx, (rtx *) 0))
+ abort ();
}
- emit_move_insn (mem_rtx, reg_rtx);
+ if (*++tmp_pnt <= 0)
+ tmp_pnt = schedule.temps;
+
+ reg_rtx = tmp_reg;
}
+ {
+ rtx insn;
+
+ /* Mark as interesting for dwarf cfi generator */
+ insn = emit_move_insn (mem_rtx, reg_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
- if (offset != d_rounding)
+ if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
+ {
+ rtx reg_rtx = gen_rtx_REG (mode, reg);
+ rtx set, note_rtx;
+ rtx mem_rtx = gen_rtx_MEM (mode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (offset)));
+
+ set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
+ note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
+ REG_NOTES (insn));
+ REG_NOTES (insn) = note_rtx;
+ }
+ }
+ }
+
+ if (entry->offset != d_rounding)
abort ();
}
else
@@ -5258,7 +5493,7 @@ sh_expand_prologue ()
target_flags = save_flags;
output_stack_adjust (-rounded_frame_size (d) + d_rounding,
- stack_pointer_rtx, TARGET_SH5 ? 0 : 1, frame_insn);
+ stack_pointer_rtx, 0, NULL);
if (frame_pointer_needed)
frame_insn (GEN_MOV (frame_pointer_rtx, stack_pointer_rtx));
@@ -5318,7 +5553,7 @@ sh_expand_epilogue ()
if (frame_pointer_needed)
{
- output_stack_adjust (frame_size, frame_pointer_rtx, 7, emit_insn);
+ output_stack_adjust (frame_size, frame_pointer_rtx, 1, &live_regs_mask);
/* We must avoid moving the stack pointer adjustment past code
which reads from the local frame, else an interrupt could
@@ -5334,7 +5569,7 @@ sh_expand_epilogue ()
occur after the SP adjustment and clobber data in the local
frame. */
emit_insn (gen_blockage ());
- output_stack_adjust (frame_size, stack_pointer_rtx, 7, emit_insn);
+ output_stack_adjust (frame_size, stack_pointer_rtx, 1, &live_regs_mask);
}
if (SHMEDIA_REGS_STACK_ADJUST ())
@@ -5355,143 +5590,129 @@ sh_expand_epilogue ()
emit_insn (gen_toggle_sz ());
if (TARGET_SH5)
{
- int offset = d_rounding;
+ int offset_base, offset;
int offset_in_r0 = -1;
int sp_in_r0 = 0;
- int align;
rtx r0 = gen_rtx_REG (Pmode, R0_REG);
- int tmp_regno = R20_REG;
-
- /* We loop twice: first, we save 8-byte aligned registers in the
- higher addresses, that are known to be aligned. Then, we
- proceed to saving 32-bit registers that don't need 8-byte
- alignment. */
- for (align = 0; align <= 1; align++)
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (live_regs_mask, i))
- {
- enum machine_mode mode = REGISTER_NATURAL_MODE (i);
- int reg = i;
- rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
+ save_schedule schedule;
+ save_entry *entry;
+ int *tmp_pnt;
+
+ entry = sh5_schedule_saves (&live_regs_mask, &schedule, d_rounding);
+ offset_base = -entry[1].offset + d_rounding;
+ tmp_pnt = schedule.temps;
+ for (; entry->mode != VOIDmode; entry--)
+ {
+ enum machine_mode mode = entry->mode;
+ int reg = entry->reg;
+ rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
- if (mode == SFmode && (i % 2) == 0
- && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
- && (TEST_HARD_REG_BIT (live_regs_mask, (i ^ 1))))
- {
- mode = DFmode;
- i++;
- }
+ offset = offset_base + entry->offset;
+ reg_rtx = gen_rtx_REG (mode, reg);
- /* If we're doing the aligned pass and this is not aligned,
- or we're doing the unaligned pass and this is aligned,
- skip it. */
- if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT)
- == 0) != align)
- continue;
+ mem_rtx = gen_rtx_MEM (mode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ GEN_INT (offset)));
- reg_rtx = gen_rtx_REG (mode, reg);
+ GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_post_inc);
- mem_rtx = gen_rtx_MEM (mode,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- GEN_INT (offset)));
+ mem_rtx = NULL_RTX;
- GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_post_inc);
-
- mem_rtx = NULL_RTX;
+ try_post_inc:
+ do
+ if (HAVE_POST_INCREMENT
+ && (offset == offset_in_r0
+ || (offset + GET_MODE_SIZE (mode) != d + d_rounding
+ && mem_rtx == NULL_RTX)
+ || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
+ {
+ post_inc = gen_rtx_MEM (mode,
+ gen_rtx_POST_INC (Pmode, r0));
- try_post_inc:
- do
- if (HAVE_POST_INCREMENT
- && (offset == offset_in_r0
- || (offset + GET_MODE_SIZE (mode) != d + d_rounding
- && mem_rtx == NULL_RTX)
- || i == PR_REG || SPECIAL_REGISTER_P (i)))
- {
- post_inc = gen_rtx_MEM (mode,
- gen_rtx_POST_INC (Pmode, r0));
+ GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (post_inc, 0),
+ post_inc_ok);
- GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (post_inc, 0),
- post_inc_ok);
+ post_inc = NULL_RTX;
- post_inc = NULL_RTX;
+ break;
+
+ post_inc_ok:
+ mem_rtx = NULL_RTX;
+ }
+ while (0);
+
+ if (mem_rtx != NULL_RTX)
+ goto addr_ok;
- break;
-
- post_inc_ok:
- mem_rtx = NULL_RTX;
- }
- while (0);
+ if (offset_in_r0 == -1)
+ {
+ emit_move_insn (r0, GEN_INT (offset));
+ offset_in_r0 = offset;
+ }
+ else if (offset != offset_in_r0)
+ {
+ emit_move_insn (r0,
+ gen_rtx_PLUS
+ (Pmode, r0,
+ GEN_INT (offset - offset_in_r0)));
+ offset_in_r0 += offset - offset_in_r0;
+ }
- if (mem_rtx != NULL_RTX)
- goto addr_ok;
-
- if (offset_in_r0 == -1)
- {
- emit_move_insn (r0, GEN_INT (offset));
- offset_in_r0 = offset;
- }
- else if (offset != offset_in_r0)
+ if (post_inc != NULL_RTX)
+ {
+ if (! sp_in_r0)
{
emit_move_insn (r0,
gen_rtx_PLUS
- (Pmode, r0,
- GEN_INT (offset - offset_in_r0)));
- offset_in_r0 += offset - offset_in_r0;
+ (Pmode, r0, stack_pointer_rtx));
+ sp_in_r0 = 1;
}
-
- if (post_inc != NULL_RTX)
- {
- if (! sp_in_r0)
- {
- emit_move_insn (r0,
- gen_rtx_PLUS
- (Pmode, r0, stack_pointer_rtx));
- sp_in_r0 = 1;
- }
-
- mem_rtx = post_inc;
+
+ mem_rtx = post_inc;
- offset_in_r0 += GET_MODE_SIZE (mode);
- }
- else if (sp_in_r0)
- mem_rtx = gen_rtx_MEM (mode, r0);
- else
- mem_rtx = gen_rtx_MEM (mode,
- gen_rtx_PLUS (Pmode,
- stack_pointer_rtx,
- r0));
-
- if ((i == PR_REG || SPECIAL_REGISTER_P (i))
- && mem_rtx != post_inc)
- abort ();
-
- addr_ok:
- if ((i == PR_REG || SPECIAL_REGISTER_P (i))
- && mem_rtx != post_inc)
- {
- insn = emit_move_insn (r0, mem_rtx);
- mem_rtx = r0;
- }
- else if (TARGET_REGISTER_P (i))
- {
- rtx tmp_reg = gen_rtx_REG (mode, tmp_regno);
-
- /* Give the scheduler a bit of freedom by using R20..R23
- in a round-robin fashion. Don't use R1 here because
- we want to use it for EH_RETURN_STACKADJ_RTX. */
- insn = emit_move_insn (tmp_reg, mem_rtx);
- mem_rtx = tmp_reg;
- if (++tmp_regno > R23_REG)
- tmp_regno = R20_REG;
- }
+ offset_in_r0 += GET_MODE_SIZE (mode);
+ }
+ else if (sp_in_r0)
+ mem_rtx = gen_rtx_MEM (mode, r0);
+ else
+ mem_rtx = gen_rtx_MEM (mode,
+ gen_rtx_PLUS (Pmode,
+ stack_pointer_rtx,
+ r0));
- insn = emit_move_insn (reg_rtx, mem_rtx);
+ if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
+ && mem_rtx != post_inc)
+ abort ();
- offset += GET_MODE_SIZE (mode);
+ addr_ok:
+ if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
+ && mem_rtx != post_inc)
+ {
+ insn = emit_move_insn (r0, mem_rtx);
+ mem_rtx = r0;
}
+ else if (TARGET_REGISTER_P (reg))
+ {
+ rtx tmp_reg = gen_rtx_REG (mode, *tmp_pnt);
+
+ /* Give the scheduler a bit of freedom by using up to
+ MAX_TEMPS registers in a round-robin fashion. */
+ insn = emit_move_insn (tmp_reg, mem_rtx);
+ mem_rtx = tmp_reg;
+ if (*++tmp_pnt < 0)
+ tmp_pnt = schedule.temps;
+ }
+
+ insn = emit_move_insn (reg_rtx, mem_rtx);
+ if (reg == PR_MEDIA_REG && sh_media_register_for_return () >= 0)
+ /* This is dead, unless we return with a sibcall. */
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
+ const0_rtx,
+ REG_NOTES (insn));
+ }
- if (offset != d + d_rounding)
+ if (entry->offset + offset_base != d + d_rounding)
abort ();
}
else /* ! TARGET_SH5 */
@@ -5521,7 +5742,7 @@ sh_expand_epilogue ()
output_stack_adjust (extra_push + current_function_pretend_args_size
+ save_size + d_rounding
+ current_function_args_info.stack_regs * 8,
- stack_pointer_rtx, 7, emit_insn);
+ stack_pointer_rtx, 1, NULL);
if (current_function_calls_eh_return)
emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
@@ -5566,7 +5787,6 @@ sh_set_return_address (ra, tmp)
{
HARD_REG_SET live_regs_mask;
int d;
- int d_rounding = 0;
int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
int pr_offset;
@@ -5598,56 +5818,26 @@ sh_set_return_address (ra, tmp)
if (TARGET_SH5)
{
- int i;
int offset;
- int align;
+ save_schedule schedule;
+ save_entry *entry;
- if (d % (STACK_BOUNDARY / BITS_PER_UNIT))
- d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
- - d % (STACK_BOUNDARY / BITS_PER_UNIT));
-
- offset = 0;
-
- /* We loop twice: first, we save 8-byte aligned registers in the
- higher addresses, that are known to be aligned. Then, we
- proceed to saving 32-bit registers that don't need 8-byte
- alignment. */
- for (align = 0; align <= 1; align++)
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (live_regs_mask, i))
- {
- enum machine_mode mode = REGISTER_NATURAL_MODE (i);
-
- if (mode == SFmode && (i % 2) == 0
- && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
- && (TEST_HARD_REG_BIT (live_regs_mask, (i ^ 1))))
- {
- mode = DFmode;
- i++;
- }
-
- /* If we're doing the aligned pass and this is not aligned,
- or we're doing the unaligned pass and this is aligned,
- skip it. */
- if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT)
- == 0) != align)
- continue;
-
- if (i == pr_reg)
- goto found;
-
- offset += GET_MODE_SIZE (mode);
- }
+ entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
+ offset = entry[1].offset;
+ for (; entry->mode != VOIDmode; entry--)
+ if (entry->reg == pr_reg)
+ goto found;
/* We can't find pr register. */
abort ();
found:
- pr_offset = (rounded_frame_size (d) - d_rounding + offset
+ offset = entry->offset - offset;
+ pr_offset = (rounded_frame_size (d) + offset
+ SHMEDIA_REGS_STACK_ADJUST ());
}
else
- pr_offset = rounded_frame_size (d) - d_rounding;
+ pr_offset = rounded_frame_size (d);
emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
emit_insn (GEN_ADD3 (tmp, tmp, frame_pointer_rtx));
@@ -5668,7 +5858,7 @@ sh_output_function_epilogue (file, size)
sp_switch = NULL_RTX;
}
-rtx
+static rtx
sh_builtin_saveregs ()
{
/* First unnamed integer register. */
@@ -5818,7 +6008,8 @@ sh_build_va_list ()
tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
tree record;
- if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4) || TARGET_HITACHI)
+ if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4)
+ || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
return ptr_type_node;
record = make_node (RECORD_TYPE);
@@ -5872,7 +6063,8 @@ sh_va_start (valist, nextarg)
return;
}
- if ((! TARGET_SH2E && ! TARGET_SH4) || TARGET_HITACHI)
+ if ((! TARGET_SH2E && ! TARGET_SH4)
+ || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
{
std_expand_builtin_va_start (valist, nextarg);
return;
@@ -5951,7 +6143,8 @@ sh_va_arg (valist, type)
if (pass_by_ref)
type = build_pointer_type (type);
- if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4) && ! TARGET_HITACHI)
+ if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
+ && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
{
tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
@@ -6135,6 +6328,343 @@ sh_va_arg (valist, type)
return result;
}
+static bool
+sh_promote_prototypes (type)
+ tree type;
+{
+ if (TARGET_HITACHI)
+ return 0;
+ if (! type)
+ return 1;
+ return ! sh_attr_renesas_p (type);
+}
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On SH the first args are normally in registers
+ and the rest are pushed. Any arg that starts within the first
+ NPARM_REGS words is at least partially passed in a register unless
+ its data type forbids. */
+
+
+rtx
+sh_function_arg (ca, mode, type, named)
+ CUMULATIVE_ARGS *ca;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ if (! TARGET_SH5 && mode == VOIDmode)
+ return GEN_INT (ca->renesas_abi ? 1 : 0);
+
+ if (! TARGET_SH5
+ && PASS_IN_REG_P (*ca, mode, type)
+ && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
+ {
+ int regno;
+
+ if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
+ && (! FUNCTION_ARG_SCmode_WART || (ROUND_REG (*ca, mode) & 1)))
+ {
+ rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SFmode,
+ BASE_ARG_REG (mode)
+ + (ROUND_REG (*ca, mode) ^ 1)),
+ const0_rtx);
+ rtx r2 = gen_rtx_EXPR_LIST(VOIDmode,
+ gen_rtx_REG (SFmode,
+ BASE_ARG_REG (mode)
+ + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
+ GEN_INT (4));
+ return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
+ }
+
+ /* If the alignment of a DF value causes an SF register to be
+ skipped, we will use that skipped register for the next SF
+ value. */
+ if ((TARGET_HITACHI || ca->renesas_abi)
+ && ca->free_single_fp_reg
+ && mode == SFmode)
+ return gen_rtx_REG (mode, ca->free_single_fp_reg);
+
+ regno = (BASE_ARG_REG (mode) + ROUND_REG (*ca, mode))
+ ^ (mode == SFmode && TARGET_SH4
+ && TARGET_LITTLE_ENDIAN != 0
+ && ! TARGET_HITACHI && ! ca->renesas_abi);
+ return gen_rtx_REG (mode, regno);
+
+ }
+
+ if (TARGET_SH5)
+ {
+ if (mode == VOIDmode && TARGET_SHCOMPACT)
+ return GEN_INT (ca->call_cookie);
+
+ /* The following test assumes unnamed arguments are promoted to
+ DFmode. */
+ if (mode == SFmode && ca->free_single_fp_reg)
+ return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode, ca->free_single_fp_reg);
+
+ if ((GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT)
+ && (named || ! ca->prototype_p)
+ && ca->arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (SFmode))
+ {
+ if (! ca->prototype_p && TARGET_SHMEDIA)
+ return SH5_PROTOTYPELESS_FLOAT_ARG (*ca, mode);
+
+ return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode,
+ FIRST_FP_PARM_REG
+ + ca->arg_count[(int) SH_ARG_FLOAT]);
+ }
+
+ if (ca->arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode)
+ && (! TARGET_SHCOMPACT
+ || (! SHCOMPACT_FORCE_ON_STACK (mode, type)
+ && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca, mode,
+ type, named))))
+ {
+ return gen_rtx_REG (mode, (FIRST_PARM_REG
+ + ca->arg_count[(int) SH_ARG_INT]));
+ }
+
+ return 0;
+ }
+
+ return 0;
+}
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be
+ available.) */
+
+void
+sh_function_arg_advance (ca, mode, type, named)
+ CUMULATIVE_ARGS *ca;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ if (ca->force_mem)
+ ca->force_mem = 0;
+ else if (TARGET_SH5)
+ {
+ tree type2 = (ca->byref && type
+ ? TREE_TYPE (type)
+ : type);
+ enum machine_mode mode2 = (ca->byref && type
+ ? TYPE_MODE (type2)
+ : mode);
+ int dwords = ((ca->byref
+ ? ca->byref
+ : mode2 == BLKmode
+ ? int_size_in_bytes (type2)
+ : GET_MODE_SIZE (mode2)) + 7) / 8;
+ int numregs = MIN (dwords, NPARM_REGS (SImode)
+ - ca->arg_count[(int) SH_ARG_INT]);
+
+ if (numregs)
+ {
+ ca->arg_count[(int) SH_ARG_INT] += numregs;
+ if (TARGET_SHCOMPACT
+ && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
+ {
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
+ - numregs, 1);
+ /* N.B. We want this also for outgoing. */
+ ca->stack_regs += numregs;
+ }
+ else if (ca->byref)
+ {
+ if (! ca->outgoing)
+ ca->stack_regs += numregs;
+ ca->byref_regs += numregs;
+ ca->byref = 0;
+ do
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
+ - numregs, 2);
+ while (--numregs);
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
+ - 1, 1);
+ }
+ else if (dwords > numregs)
+ {
+ int pushregs = numregs;
+
+ if (TARGET_SHCOMPACT)
+ ca->stack_regs += numregs;
+ while (pushregs < NPARM_REGS (SImode) - 1
+ && (CALL_COOKIE_INT_REG_GET
+ (ca->call_cookie,
+ NPARM_REGS (SImode) - pushregs)
+ == 1))
+ {
+ ca->call_cookie
+ &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
+ - pushregs, 1);
+ pushregs++;
+ }
+ if (numregs == NPARM_REGS (SImode))
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (0, 1)
+ | CALL_COOKIE_STACKSEQ (numregs - 1);
+ else
+ ca->call_cookie
+ |= CALL_COOKIE_STACKSEQ (numregs);
+ }
+ }
+ if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
+ && (named || ! ca->prototype_p))
+ {
+ if (mode2 == SFmode && ca->free_single_fp_reg)
+ ca->free_single_fp_reg = 0;
+ else if (ca->arg_count[(int) SH_ARG_FLOAT]
+ < NPARM_REGS (SFmode))
+ {
+ int numfpregs
+ = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
+ NPARM_REGS (SFmode)
+ - ca->arg_count[(int) SH_ARG_FLOAT]);
+
+ ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
+
+ if (TARGET_SHCOMPACT && ! ca->prototype_p)
+ {
+ if (ca->outgoing && numregs > 0)
+ do
+ {
+ ca->call_cookie
+ |= (CALL_COOKIE_INT_REG
+ (ca->arg_count[(int) SH_ARG_INT]
+ - numregs + ((numfpregs - 2) / 2),
+ 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
+ - numfpregs) / 2));
+ }
+ while (numfpregs -= 2);
+ }
+ else if (mode2 == SFmode && (named)
+ && (ca->arg_count[(int) SH_ARG_FLOAT]
+ < NPARM_REGS (SFmode)))
+ ca->free_single_fp_reg
+ = FIRST_FP_PARM_REG - numfpregs
+ + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
+ }
+ }
+ return;
+ }
+
+ if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
+ {
+ /* Note that we've used the skipped register. */
+ if (mode == SFmode && ca->free_single_fp_reg)
+ {
+ ca->free_single_fp_reg = 0;
+ return;
+ }
+ /* When we have a DF after an SF, there's an SF register that get
+ skipped in order to align the DF value. We note this skipped
+ register, because the next SF value will use it, and not the
+ SF that follows the DF. */
+ if (mode == DFmode
+ && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
+ {
+ ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
+ + BASE_ARG_REG (mode));
+ }
+ }
+
+ if (! (TARGET_SH4 || ca->renesas_abi)
+ || PASS_IN_REG_P (*ca, mode, type))
+ (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
+ = (ROUND_REG (*ca, mode)
+ + (mode == BLKmode
+ ? ROUND_ADVANCE (int_size_in_bytes (type))
+ : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
+}
+
+/* If the structure value address is not passed in a register, define
+ `STRUCT_VALUE' as an expression returning an RTX for the place
+ where the address is passed. If it returns 0, the address is
+ passed as an "invisible" first argument. */
+/* The Renesas calling convention doesn't quite fit into this scheme since
+ the address is passed like an invisible argument, but one that is always
+ passed in memory. */
+static rtx
+sh_struct_value_rtx (fndecl, incoming)
+ tree fndecl;
+ int incoming ATTRIBUTE_UNUSED;
+{
+ if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
+ return 0;
+ return gen_rtx_REG (Pmode, 2);
+}
+
+static bool
+sh_return_in_memory (type, fndecl)
+ tree type;
+ tree fndecl;
+{
+ if (TARGET_SH5)
+ {
+ if (TYPE_MODE (type) == BLKmode)
+ return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)) > 8;
+ else
+ return GET_MODE_SIZE (TYPE_MODE (type)) > 8;
+ }
+ else
+ {
+ return (TYPE_MODE (type) == BLKmode
+ || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
+ && TREE_CODE (type) == RECORD_TYPE));
+ }
+}
+
+/* We actually emit the code in sh_expand_prologue. We used to use
+ a static variable to flag that we need to emit this code, but that
+ doesn't when inlining, when functions are deferred and then emitted
+ later. Fortunately, we already have two flags that are part of struct
+ function that tell if a function uses varargs or stdarg. */
+static void
+sh_setup_incoming_varargs (ca, mode, type, pretend_arg_size, second_time)
+ CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ tree type ATTRIBUTE_UNUSED;
+ int *pretend_arg_size ATTRIBUTE_UNUSED;
+ int second_time ATTRIBUTE_UNUSED;
+{
+ if (! current_function_stdarg)
+ abort ();
+}
+
+static bool
+sh_strict_argument_naming (ca)
+ CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED;
+{
+ return TARGET_SH5;
+}
+
+static bool
+sh_pretend_outgoing_varargs_named (ca)
+ CUMULATIVE_ARGS *ca;
+{
+ return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
+}
+
+
/* Define the offset between two registers, one to be eliminated, and
the other its replacement, at the start of a routine. */
@@ -6188,9 +6718,10 @@ initial_elimination_offset (from, to)
{
if (TARGET_SH5)
{
- int i, n = total_saved_regs_space;
- int align;
+ int n = total_saved_regs_space;
int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
+ save_schedule schedule;
+ save_entry *entry;
n += total_auto_space;
@@ -6200,40 +6731,13 @@ initial_elimination_offset (from, to)
target_flags = copy_flags;
- /* We loop twice: first, check 8-byte aligned registers,
- that are stored in the higher addresses, that are known
- to be aligned. Then, check 32-bit registers that don't
- need 8-byte alignment. */
- for (align = 1; align >= 0; align--)
- for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
- if (TEST_HARD_REG_BIT (live_regs_mask, i))
- {
- enum machine_mode mode = REGISTER_NATURAL_MODE (i);
-
- if (mode == SFmode && (i % 2) == 1
- && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
- && TEST_HARD_REG_BIT (live_regs_mask, (i ^ 1)))
- {
- mode = DFmode;
- i--;
- }
-
- /* If we're doing the aligned pass and this is not aligned,
- or we're doing the unaligned pass and this is aligned,
- skip it. */
- if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT)
- == 0) != align)
- continue;
-
- n -= GET_MODE_SIZE (mode);
-
- if (i == pr_reg)
- {
- target_flags = save_flags;
- return n;
- }
- }
-
+ sh5_schedule_saves (&live_regs_mask, &schedule, n);
+ for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
+ if (entry->reg == pr_reg)
+ {
+ target_flags = save_flags;
+ return entry->offset;
+ }
abort ();
}
else
@@ -6296,7 +6800,12 @@ sh_insert_attributes (node, attributes)
to run on.
trap_exit -- use a trapa to exit an interrupt function instead of
- an rte instruction. */
+ an rte instruction.
+
+ renesas -- use Renesas calling/layout conventions (functions and
+ structures).
+
+*/
const struct attribute_spec sh_attribute_table[] =
{
@@ -6304,6 +6813,7 @@ const struct attribute_spec sh_attribute_table[] =
{ "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
{ "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
{ "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
+ { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
{ NULL, 0, 0, false, false, false, NULL }
};
@@ -6409,6 +6919,40 @@ sh_handle_trap_exit_attribute (node, name, args, flags, no_add_attrs)
return NULL_TREE;
}
+static tree
+sh_handle_renesas_attribute (node, name, args, flags, no_add_attrs)
+ tree *node ATTRIBUTE_UNUSED;
+ tree name ATTRIBUTE_UNUSED;
+ tree args ATTRIBUTE_UNUSED;
+ int flags ATTRIBUTE_UNUSED;
+ bool *no_add_attrs ATTRIBUTE_UNUSED;
+{
+ return NULL_TREE;
+}
+
+/* True if __attribute__((renesas)) or -mrenesas. */
+int
+sh_attr_renesas_p (td)
+ tree td;
+{
+ if (TARGET_HITACHI)
+ return 1;
+ if (td == 0)
+ return 0;
+ if (DECL_P (td))
+ td = TREE_TYPE (td);
+ return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
+ != NULL_TREE);
+}
+
+/* True if __attribute__((renesas)) or -mrenesas, for the current
+ function. */
+int
+sh_cfun_attr_renesas_p ()
+{
+ return sh_attr_renesas_p (current_function_decl);
+}
+
int
sh_cfun_interrupt_handler_p ()
{
@@ -7872,7 +8416,7 @@ static bool
sh_ms_bitfield_layout_p (record_type)
tree record_type ATTRIBUTE_UNUSED;
{
- return TARGET_SH5;
+ return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
}
/*
@@ -8226,7 +8770,7 @@ sh_media_init_builtins ()
const struct builtin_description *d;
memset (shared, 0, sizeof shared);
- for (d = bdesc; d - bdesc < (int) (sizeof bdesc / sizeof bdesc[0]); d++)
+ for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
{
tree type, arg_type;
int signature = d->signature;
@@ -8555,10 +9099,10 @@ sh_output_mi_thunk (file, thunk_fndecl, delta, vcall_offset, function)
comes first, in which case "this" comes second. */
INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0);
#ifndef PCC_STATIC_STRUCT_RETURN
- if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function))))
+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
structure_value_byref = 1;
#endif /* not PCC_STATIC_STRUCT_RETURN */
- if (structure_value_byref && struct_value_rtx == 0)
+ if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
{
tree ptype = build_pointer_type (TREE_TYPE (funtype));
@@ -8705,4 +9249,47 @@ function_symbol (const char *name)
return sym;
}
+/* Find the number of a general purpose register in S. */
+static int
+scavenge_reg (HARD_REG_SET *s)
+{
+ int r;
+ for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
+ if (TEST_HARD_REG_BIT (*s, r))
+ return r;
+ return -1;
+}
+
+rtx
+sh_get_pr_initial_val (void)
+{
+ rtx val;
+
+ /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
+ PR register on SHcompact, because it might be clobbered by the prologue.
+ We check first if that is known to be the case. */
+ if (TARGET_SHCOMPACT
+ && ((current_function_args_info.call_cookie
+ & ~ CALL_COOKIE_RET_TRAMP (1))
+ || current_function_has_nonlocal_label))
+ return gen_rtx_MEM (SImode, return_address_pointer_rtx);
+
+ /* If we haven't finished rtl generation, there might be a nonlocal label
+ that we haven't seen yet.
+ ??? get_hard_reg_initial_val fails if it is called while no_new_pseudos
+ is set, unless it has been called before for the same register. And even
+ then, we end in trouble if we didn't use the register in the same
+ basic block before. So call get_hard_reg_initial_val now and wrap it
+ in an unspec if we might need to replace it. */
+ /* ??? We also must do this for TARGET_SH1 in general, because otherwise
+ combine can put the pseudo returned by get_hard_reg_initial_val into
+ instructions that need a general purpose registers, which will fail to
+ be recognized when the pseudo becomes allocated to PR. */
+ val
+ = get_hard_reg_initial_val (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
+ if (TARGET_SH1)
+ return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
+ return val;
+}
+
#include "gt-sh.h"