aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/s390/s390.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/s390/s390.c')
-rw-r--r--gcc/config/s390/s390.c300
1 files changed, 150 insertions, 150 deletions
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index b1cfad1adfe..d5c3c22a93c 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -56,7 +56,7 @@ along with GCC; see the file COPYING3. If not see
/* Define the specific costs for a given cpu. */
-struct processor_costs
+struct processor_costs
{
/* multiplication */
const int m; /* cost of an M instruction. */
@@ -94,7 +94,7 @@ struct processor_costs
const struct processor_costs *s390_cost;
static const
-struct processor_costs z900_cost =
+struct processor_costs z900_cost =
{
COSTS_N_INSNS (5), /* M */
COSTS_N_INSNS (10), /* MGHI */
@@ -126,7 +126,7 @@ struct processor_costs z900_cost =
};
static const
-struct processor_costs z990_cost =
+struct processor_costs z990_cost =
{
COSTS_N_INSNS (4), /* M */
COSTS_N_INSNS (2), /* MGHI */
@@ -158,7 +158,7 @@ struct processor_costs z990_cost =
};
static const
-struct processor_costs z9_109_cost =
+struct processor_costs z9_109_cost =
{
COSTS_N_INSNS (4), /* M */
COSTS_N_INSNS (2), /* MGHI */
@@ -252,7 +252,7 @@ HOST_WIDE_INT s390_warn_framesize = 0;
HOST_WIDE_INT s390_stack_size = 0;
HOST_WIDE_INT s390_stack_guard = 0;
-/* The following structure is embedded in the machine
+/* The following structure is embedded in the machine
specific part of struct function. */
struct GTY (()) s390_frame_layout
@@ -275,8 +275,8 @@ struct GTY (()) s390_frame_layout
int last_save_gpr;
int last_restore_gpr;
- /* Bits standing for floating point registers. Set, if the
- respective register has to be saved. Starting with reg 16 (f0)
+ /* Bits standing for floating point registers. Set, if the
+ respective register has to be saved. Starting with reg 16 (f0)
at the rightmost bit.
Bit 15 - 8 7 6 5 4 3 2 1 0
fpr 15 - 8 7 5 3 1 6 4 2 0
@@ -400,7 +400,7 @@ s390_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
case CCZ1mode:
if (m2 == CCZmode)
return m1;
-
+
return VOIDmode;
default:
@@ -510,7 +510,7 @@ s390_tm_ccmode (rtx op1, rtx op2, bool mixed)
if (INTVAL (op2) == 0)
return CCTmode;
- /* Selected bits all one: CC3.
+ /* Selected bits all one: CC3.
e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
if (INTVAL (op2) == INTVAL (op1))
return CCT3mode;
@@ -582,7 +582,7 @@ s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
case GT:
/* The only overflow condition of NEG and ABS happens when
-INT_MAX is used as parameter, which stays negative. So
- we have an overflow from a positive value to a negative.
+ we have an overflow from a positive value to a negative.
Using CCAP mode the resulting cc can be used for comparisons. */
if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
@@ -591,7 +591,7 @@ s390_select_ccmode (enum rtx_code code, rtx op0, rtx op1)
/* If constants are involved in an add instruction it is possible to use
the resulting cc for comparisons with zero. Knowing the sign of the
constant the overflow behavior gets predictable. e.g.:
- int a, b; if ((b = a + c) > 0)
+ int a, b; if ((b = a + c) > 0)
with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
&& CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
@@ -714,7 +714,7 @@ s390_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1)
&& GET_CODE (*op1) == CONST_INT
&& INTVAL (*op1) == 0xffff
&& SCALAR_INT_MODE_P (GET_MODE (*op0))
- && (nonzero_bits (*op0, GET_MODE (*op0))
+ && (nonzero_bits (*op0, GET_MODE (*op0))
& ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
{
*op0 = gen_lowpart (HImode, *op0);
@@ -822,7 +822,7 @@ s390_emit_compare (enum rtx_code code, rtx op0, rtx op1)
emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
}
- return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
+ return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
}
/* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
@@ -1296,9 +1296,9 @@ s390_overlap_p (rtx mem1, rtx mem2, HOST_WIDE_INT size)
/* This overlapping check is used by peepholes merging memory block operations.
Overlapping operations would otherwise be recognized by the S/390 hardware
- and would fall back to a slower implementation. Allowing overlapping
+ and would fall back to a slower implementation. Allowing overlapping
operations would lead to slow code but not to wrong code. Therefore we are
- somewhat optimistic if we cannot prove that the memory blocks are
+ somewhat optimistic if we cannot prove that the memory blocks are
overlapping.
That's why we return false here although this may accept operations on
overlapping memory areas. */
@@ -1621,7 +1621,7 @@ override_options (void)
error ("stack size must not be greater than 64k");
}
else if (s390_stack_guard)
- error ("-mstack-guard implies use of -mstack-size");
+ error ("-mstack-guard implies use of -mstack-size");
#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
@@ -1804,7 +1804,7 @@ s390_decompose_address (rtx addr, struct s390_address *out)
{
case UNSPEC_LTREF:
if (!disp)
- disp = gen_rtx_UNSPEC (Pmode,
+ disp = gen_rtx_UNSPEC (Pmode,
gen_rtvec (1, XVECEXP (base, 0, 0)),
UNSPEC_LTREL_OFFSET);
else
@@ -1824,8 +1824,8 @@ s390_decompose_address (rtx addr, struct s390_address *out)
return false;
}
- if (!REG_P (base)
- || (GET_MODE (base) != SImode
+ if (!REG_P (base)
+ || (GET_MODE (base) != SImode
&& GET_MODE (base) != Pmode))
return false;
@@ -1852,7 +1852,7 @@ s390_decompose_address (rtx addr, struct s390_address *out)
{
case UNSPEC_LTREF:
if (!disp)
- disp = gen_rtx_UNSPEC (Pmode,
+ disp = gen_rtx_UNSPEC (Pmode,
gen_rtvec (1, XVECEXP (indx, 0, 0)),
UNSPEC_LTREL_OFFSET);
else
@@ -1872,7 +1872,7 @@ s390_decompose_address (rtx addr, struct s390_address *out)
return false;
}
- if (!REG_P (indx)
+ if (!REG_P (indx)
|| (GET_MODE (indx) != SImode
&& GET_MODE (indx) != Pmode))
return false;
@@ -1904,21 +1904,21 @@ s390_decompose_address (rtx addr, struct s390_address *out)
/* Validate displacement. */
if (!disp)
{
- /* If virtual registers are involved, the displacement will change later
- anyway as the virtual registers get eliminated. This could make a
- valid displacement invalid, but it is more likely to make an invalid
- displacement valid, because we sometimes access the register save area
+ /* If virtual registers are involved, the displacement will change later
+ anyway as the virtual registers get eliminated. This could make a
+ valid displacement invalid, but it is more likely to make an invalid
+ displacement valid, because we sometimes access the register save area
via negative offsets to one of those registers.
Thus we don't check the displacement for validity here. If after
elimination the displacement turns out to be invalid after all,
this is fixed up by reload in any case. */
- if (base != arg_pointer_rtx
- && indx != arg_pointer_rtx
- && base != return_address_pointer_rtx
+ if (base != arg_pointer_rtx
+ && indx != arg_pointer_rtx
+ && base != return_address_pointer_rtx
&& indx != return_address_pointer_rtx
- && base != frame_pointer_rtx
+ && base != frame_pointer_rtx
&& indx != frame_pointer_rtx
- && base != virtual_stack_vars_rtx
+ && base != virtual_stack_vars_rtx
&& indx != virtual_stack_vars_rtx)
if (!DISP_IN_RANGE (offset))
return false;
@@ -2271,8 +2271,8 @@ s390_float_const_zero_p (rtx value)
/* Compute a (partial) cost for rtx X. Return true if the complete
cost has been computed, and false if subexpressions should be
- scanned. In either case, *TOTAL contains the cost result.
- CODE contains GET_CODE (x), OUTER_CODE contains the code
+ scanned. In either case, *TOTAL contains the cost result.
+ CODE contains GET_CODE (x), OUTER_CODE contains the code
of the superexpression of x. */
static bool
@@ -2323,7 +2323,7 @@ s390_rtx_costs (rtx x, int code, int outer_code, int *total,
*total = COSTS_N_INSNS (1);
return false;
- case MULT:
+ case MULT:
switch (GET_MODE (x))
{
case SImode:
@@ -3104,11 +3104,11 @@ s390_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
}
else
{
- if (ad.base
+ if (ad.base
&& !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
|| REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
return false;
-
+
if (ad.indx
&& !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
|| REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
@@ -3354,7 +3354,7 @@ legitimize_pic_address (rtx orig, rtx reg)
gcc_unreachable ();
}
}
- else
+ else
gcc_assert (GET_CODE (addr) == PLUS);
}
if (GET_CODE (addr) == PLUS)
@@ -3743,7 +3743,7 @@ s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
return x;
}
else if (GET_CODE (x) == PLUS
- && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
+ && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
|| TLS_SYMBOLIC_CONST (XEXP (x, 1))))
{
return x;
@@ -3822,7 +3822,7 @@ s390_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
MODE is the mode of the enclosing MEM. OPNUM is the operand number
and TYPE is the reload type of the current reload. */
-rtx
+rtx
legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
int opnum, int type)
{
@@ -3854,7 +3854,7 @@ legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
- BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
opnum, (enum reload_type) type);
return new_rtx;
}
@@ -3952,7 +3952,7 @@ s390_expand_setmem (rtx dst, rtx len, rtx val)
return;
gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
-
+
if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
{
if (val == const0_rtx && INTVAL (len) <= 256)
@@ -3961,7 +3961,7 @@ s390_expand_setmem (rtx dst, rtx len, rtx val)
{
/* Initialize memory by storing the first byte. */
emit_move_insn (adjust_address (dst, QImode, 0), val);
-
+
if (INTVAL (len) > 1)
{
/* Initiate 1 byte overlap move.
@@ -3972,7 +3972,7 @@ s390_expand_setmem (rtx dst, rtx len, rtx val)
rtx dstp1 = adjust_address (dst, VOIDmode, 1);
set_mem_size (dst, const1_rtx);
- emit_insn (gen_movmem_short (dstp1, dst,
+ emit_insn (gen_movmem_short (dstp1, dst,
GEN_INT (INTVAL (len) - 2)));
}
}
@@ -4018,7 +4018,7 @@ s390_expand_setmem (rtx dst, rtx len, rtx val)
/* Initialize memory by storing the first byte. */
emit_move_insn (adjust_address (dst, QImode, 0), val);
-
+
/* If count is 1 we are done. */
emit_cmp_and_jump_insns (count, const1_rtx,
EQ, NULL_RTX, mode, 1, end_label);
@@ -4268,9 +4268,9 @@ s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
}
p = rtvec_alloc (2);
- RTVEC_ELT (p, 0) =
+ RTVEC_ELT (p, 0) =
gen_rtx_SET (VOIDmode, dst, op_res);
- RTVEC_ELT (p, 1) =
+ RTVEC_ELT (p, 1) =
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
@@ -4329,15 +4329,15 @@ s390_expand_addcc (enum rtx_code cmp_code, rtx cmp_op0, rtx cmp_op1,
if (!register_operand (src, GET_MODE (dst)))
src = force_reg (GET_MODE (dst), src);
- op_res = gen_rtx_MINUS (GET_MODE (dst),
- gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
- gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
- gen_rtx_REG (cc_mode, CC_REGNUM),
+ op_res = gen_rtx_MINUS (GET_MODE (dst),
+ gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
+ gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
+ gen_rtx_REG (cc_mode, CC_REGNUM),
const0_rtx));
p = rtvec_alloc (2);
- RTVEC_ELT (p, 0) =
+ RTVEC_ELT (p, 0) =
gen_rtx_SET (VOIDmode, dst, op_res);
- RTVEC_ELT (p, 1) =
+ RTVEC_ELT (p, 1) =
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
@@ -4397,7 +4397,7 @@ s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
set_mem_size (dest, GEN_INT (size));
s390_expand_movmem (dest, src_mem, GEN_INT (size));
}
-
+
/* (set (ze (mem)) (reg)). */
else if (register_operand (src, word_mode))
{
@@ -4410,7 +4410,7 @@ s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
int size = stcmh_width / BITS_PER_UNIT;
- emit_move_insn (adjust_address (dest, SImode, size),
+ emit_move_insn (adjust_address (dest, SImode, size),
gen_lowpart (SImode, src));
set_mem_size (dest, GEN_INT (size));
emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
@@ -4427,7 +4427,7 @@ s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
/* (set (ze (reg)) (const_int)). */
if (TARGET_ZARCH
- && register_operand (dest, word_mode)
+ && register_operand (dest, word_mode)
&& (bitpos % 16) == 0
&& (bitsize % 16) == 0
&& const_int_operand (src, VOIDmode))
@@ -4447,9 +4447,9 @@ s390_expand_insv (rtx dest, rtx op1, rtx op2, rtx src)
putsize = GET_MODE_BITSIZE (putmode);
regpos -= putsize;
- emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
+ emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
GEN_INT (putsize),
- GEN_INT (regpos)),
+ GEN_INT (regpos)),
gen_int_mode (val, putmode));
val >>= putsize;
}
@@ -4468,16 +4468,16 @@ s390_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
{
val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
NULL_RTX, 1, OPTAB_DIRECT);
- return expand_simple_binop (SImode, ASHIFT, val, count,
+ return expand_simple_binop (SImode, ASHIFT, val, count,
NULL_RTX, 1, OPTAB_DIRECT);
}
/* Structure to hold the initial parameters for a compare_and_swap operation
- in HImode and QImode. */
+ in HImode and QImode. */
struct alignment_context
{
- rtx memsi; /* SI aligned memory location. */
+ rtx memsi; /* SI aligned memory location. */
rtx shift; /* Bit offset with regard to lsb. */
rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
rtx modemaski; /* ~modemask */
@@ -4529,7 +4529,7 @@ init_alignment_context (struct alignment_context *ac, rtx mem,
ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
NULL_RTX, 1, OPTAB_DIRECT);
/* Calculate masks. */
- ac->modemask = expand_simple_binop (SImode, ASHIFT,
+ ac->modemask = expand_simple_binop (SImode, ASHIFT,
GEN_INT (GET_MODE_MASK (mode)), ac->shift,
NULL_RTX, 1, OPTAB_DIRECT);
ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
@@ -4567,9 +4567,9 @@ s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx ne
/* Start CS loop. */
emit_label (csloop);
- /* val = "<mem>00..0<mem>"
+ /* val = "<mem>00..0<mem>"
* cmp = "00..0<cmp>00..0"
- * new = "00..0<new>00..0"
+ * new = "00..0<new>00..0"
*/
/* Patch cmp and new with val at correct position. */
@@ -4595,17 +4595,17 @@ s390_expand_cs_hqi (enum machine_mode mode, rtx target, rtx mem, rtx cmp, rtx ne
cmpv, newv));
/* Check for changes outside mode. */
- resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
+ resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
NULL_RTX, 1, OPTAB_DIRECT);
- cc = s390_emit_compare (NE, resv, val);
+ cc = s390_emit_compare (NE, resv, val);
emit_move_insn (val, resv);
/* Loop internal if so. */
s390_emit_jump (csloop, cc);
emit_label (csend);
-
+
/* Return the correct part of the bitfield. */
- convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
+ convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
NULL_RTX, 1, OPTAB_DIRECT), 1);
}
@@ -4659,7 +4659,7 @@ s390_expand_atomic (enum machine_mode mode, enum rtx_code code,
val = expand_simple_binop (SImode, AND, val, ac.modemask,
NULL_RTX, 1, OPTAB_DIRECT);
/* FALLTHRU */
- case SET:
+ case SET:
if (ac.aligned && MEM_P (val))
store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
else
@@ -5511,8 +5511,8 @@ s390_split_branches (void)
}
-/* Find an annotated literal pool symbol referenced in RTX X,
- and store it at REF. Will abort if X contains references to
+/* Find an annotated literal pool symbol referenced in RTX X,
+ and store it at REF. Will abort if X contains references to
more than one such pool symbol; multiple references to the same
symbol are allowed, however.
@@ -5545,7 +5545,7 @@ find_constant_pool_ref (rtx x, rtx *ref)
if (*ref == NULL_RTX)
*ref = sym;
- else
+ else
gcc_assert (*ref == sym);
return;
@@ -5566,7 +5566,7 @@ find_constant_pool_ref (rtx x, rtx *ref)
}
}
-/* Replace every reference to the annotated literal pool
+/* Replace every reference to the annotated literal pool
symbol REF in X by its base plus OFFSET. */
static void
@@ -6511,7 +6511,7 @@ s390_chunkify_start (void)
for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
{
- rtx new_insn = gen_reload_base (cfun->machine->base_reg,
+ rtx new_insn = gen_reload_base (cfun->machine->base_reg,
curr_pool->label);
rtx insn = curr_pool->first_insn;
INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
@@ -6526,7 +6526,7 @@ s390_chunkify_start (void)
struct constant_pool *pool = s390_find_pool (pool_list, insn);
if (pool)
{
- rtx new_insn = gen_reload_base (cfun->machine->base_reg,
+ rtx new_insn = gen_reload_base (cfun->machine->base_reg,
pool->label);
INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
}
@@ -6763,7 +6763,7 @@ find_unused_clobbered_reg (void)
}
-/* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
+/* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
clobbered hard regs in SETREG. */
static void
@@ -6821,8 +6821,8 @@ s390_regs_ever_clobbered (int *regs_ever_clobbered)
deal with this automatically. */
if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
- if (crtl->calls_eh_return
- || (cfun->machine->has_landing_pad_p
+ if (crtl->calls_eh_return
+ || (cfun->machine->has_landing_pad_p
&& df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
@@ -6841,16 +6841,16 @@ s390_regs_ever_clobbered (int *regs_ever_clobbered)
{
if (INSN_P (cur_insn))
note_stores (PATTERN (cur_insn),
- s390_reg_clobbered_rtx,
+ s390_reg_clobbered_rtx,
regs_ever_clobbered);
}
}
}
-/* Determine the frame area which actually has to be accessed
- in the function epilogue. The values are stored at the
+/* Determine the frame area which actually has to be accessed
+ in the function epilogue. The values are stored at the
given pointers AREA_BOTTOM (address of the lowest used stack
- address) and AREA_TOP (address of the first item which does
+ address) and AREA_TOP (address of the first item which does
not belong to the stack frame). */
static void
@@ -6884,7 +6884,7 @@ s390_frame_area (int *area_bottom, int *area_top)
b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
}
-
+
*area_bottom = b;
*area_top = t;
}
@@ -6923,10 +6923,10 @@ s390_register_info (int clobbered_regs[])
clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
if (flag_pic)
- clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
+ clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
|= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
- clobbered_regs[BASE_REGNUM]
+ clobbered_regs[BASE_REGNUM]
|= (cfun->machine->base_reg
&& REGNO (cfun->machine->base_reg) == BASE_REGNUM);
@@ -6969,8 +6969,8 @@ s390_register_info (int clobbered_regs[])
cfun_frame_layout.first_save_gpr_slot = i;
cfun_frame_layout.last_save_gpr_slot = j;
- for (i = cfun_frame_layout.first_save_gpr_slot;
- i < cfun_frame_layout.last_save_gpr_slot + 1;
+ for (i = cfun_frame_layout.first_save_gpr_slot;
+ i < cfun_frame_layout.last_save_gpr_slot + 1;
i++)
if (clobbered_regs[i])
break;
@@ -6978,7 +6978,7 @@ s390_register_info (int clobbered_regs[])
for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
if (clobbered_regs[j])
break;
-
+
if (i == cfun_frame_layout.last_save_gpr_slot + 1)
{
/* Nothing to save/restore. */
@@ -7058,7 +7058,7 @@ s390_frame_info (void)
cfun_frame_layout.frame_size = get_frame_size ();
if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
fatal_error ("total size of local variables exceeds architecture limit");
-
+
if (!TARGET_PACKED_STACK)
{
cfun_frame_layout.backchain_offset = 0;
@@ -7072,46 +7072,46 @@ s390_frame_info (void)
{
cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
- UNITS_PER_WORD);
- cfun_frame_layout.gprs_offset
- = (cfun_frame_layout.backchain_offset
+ cfun_frame_layout.gprs_offset
+ = (cfun_frame_layout.backchain_offset
- (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
* UNITS_PER_WORD);
-
+
if (TARGET_64BIT)
{
- cfun_frame_layout.f4_offset
+ cfun_frame_layout.f4_offset
= (cfun_frame_layout.gprs_offset
- 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
-
- cfun_frame_layout.f0_offset
- = (cfun_frame_layout.f4_offset
+
+ cfun_frame_layout.f0_offset
+ = (cfun_frame_layout.f4_offset
- 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
}
else
{
/* On 31 bit we have to care about alignment of the
floating point regs to provide fastest access. */
- cfun_frame_layout.f0_offset
- = ((cfun_frame_layout.gprs_offset
+ cfun_frame_layout.f0_offset
+ = ((cfun_frame_layout.gprs_offset
& ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
- 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
-
- cfun_frame_layout.f4_offset
+
+ cfun_frame_layout.f4_offset
= (cfun_frame_layout.f0_offset
- 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
}
}
else /* no backchain */
{
- cfun_frame_layout.f4_offset
+ cfun_frame_layout.f4_offset
= (STACK_POINTER_OFFSET
- 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
-
- cfun_frame_layout.f0_offset
+
+ cfun_frame_layout.f0_offset
= (cfun_frame_layout.f4_offset
- 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
-
- cfun_frame_layout.gprs_offset
+
+ cfun_frame_layout.gprs_offset
= cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
}
@@ -7132,7 +7132,7 @@ s390_frame_info (void)
if (TARGET_BACKCHAIN)
cfun_frame_layout.frame_size += UNITS_PER_WORD;
- /* No alignment trouble here because f8-f15 are only saved under
+ /* No alignment trouble here because f8-f15 are only saved under
64 bit. */
cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
cfun_frame_layout.f4_offset),
@@ -7144,9 +7144,9 @@ s390_frame_info (void)
for (i = 0; i < 8; i++)
if (cfun_fpr_bit_p (i))
cfun_frame_layout.frame_size += 8;
-
+
cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
-
+
/* If under 31 bit an odd number of gprs has to be saved we have to adjust
the frame size to sustain 8 byte alignment of stack frames. */
cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
@@ -7213,11 +7213,11 @@ s390_update_frame_layout (void)
s390_register_info (clobbered_regs);
- df_set_regs_ever_live (BASE_REGNUM,
+ df_set_regs_ever_live (BASE_REGNUM,
clobbered_regs[BASE_REGNUM] ? true : false);
- df_set_regs_ever_live (RETURN_REGNUM,
+ df_set_regs_ever_live (RETURN_REGNUM,
clobbered_regs[RETURN_REGNUM] ? true : false);
- df_set_regs_ever_live (STACK_POINTER_REGNUM,
+ df_set_regs_ever_live (STACK_POINTER_REGNUM,
clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
if (cfun->machine->base_reg)
@@ -7249,10 +7249,10 @@ s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
case GENERAL_REGS:
if (REGNO_PAIR_OK (regno, mode))
{
- if (TARGET_64BIT
+ if (TARGET_64BIT
|| (mode != TFmode && mode != TCmode && mode != TDmode))
return true;
- }
+ }
break;
case CC_REGS:
if (GET_MODE_CLASS (mode) == MODE_CC)
@@ -7268,7 +7268,7 @@ s390_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
default:
return false;
}
-
+
return false;
}
@@ -7365,7 +7365,7 @@ s390_initial_elimination_offset (int from, int to)
switch (from)
{
case FRAME_POINTER_REGNUM:
- offset = (get_frame_size()
+ offset = (get_frame_size()
+ STACK_POINTER_OFFSET
+ crtl->outgoing_args_size);
break;
@@ -7460,7 +7460,7 @@ save_gprs (rtx base, int offset, int first, int last)
for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
{
rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
-
+
if (first + i <= 6)
set_mem_alias_set (mem, get_varargs_alias_set ());
}
@@ -7624,8 +7624,8 @@ s390_emit_prologue (void)
/* Choose best register to use for temp use within prologue.
See below for why TPF must use the register 1. */
- if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
- && !current_function_is_leaf
+ if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
+ && !current_function_is_leaf
&& !TARGET_TPF_PROFILING)
temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
else
@@ -7634,11 +7634,11 @@ s390_emit_prologue (void)
/* Save call saved gprs. */
if (cfun_frame_layout.first_save_gpr != -1)
{
- insn = save_gprs (stack_pointer_rtx,
- cfun_frame_layout.gprs_offset +
- UNITS_PER_WORD * (cfun_frame_layout.first_save_gpr
+ insn = save_gprs (stack_pointer_rtx,
+ cfun_frame_layout.gprs_offset +
+ UNITS_PER_WORD * (cfun_frame_layout.first_save_gpr
- cfun_frame_layout.first_save_gpr_slot),
- cfun_frame_layout.first_save_gpr,
+ cfun_frame_layout.first_save_gpr,
cfun_frame_layout.last_save_gpr);
emit_insn (insn);
}
@@ -7691,14 +7691,14 @@ s390_emit_prologue (void)
if (cfun_fpr_bit_p (i))
{
insn = save_fpr (stack_pointer_rtx, offset, i + 16);
-
+
RTX_FRAME_RELATED_P (insn) = 1;
offset -= 8;
}
if (offset >= cfun_frame_layout.f8_offset)
next_fpr = i + 16;
}
-
+
if (!TARGET_PACKED_STACK)
next_fpr = cfun_save_high_fprs_p ? 31 : 0;
@@ -7750,9 +7750,9 @@ s390_emit_prologue (void)
}
}
- if (s390_warn_framesize > 0
+ if (s390_warn_framesize > 0
&& cfun_frame_layout.frame_size >= s390_warn_framesize)
- warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
+ warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
current_function_name (), cfun_frame_layout.frame_size);
if (s390_warn_dynamicstack_p && cfun->calls_alloca)
@@ -7767,7 +7767,7 @@ s390_emit_prologue (void)
if (DISP_IN_RANGE (INTVAL (frame_off)))
{
insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
frame_off));
insn = emit_insn (insn);
}
@@ -7792,11 +7792,11 @@ s390_emit_prologue (void)
if (TARGET_BACKCHAIN)
{
if (cfun_frame_layout.backchain_offset)
- addr = gen_rtx_MEM (Pmode,
- plus_constant (stack_pointer_rtx,
+ addr = gen_rtx_MEM (Pmode,
+ plus_constant (stack_pointer_rtx,
cfun_frame_layout.backchain_offset));
else
- addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
+ addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
set_mem_alias_set (addr, get_frame_alias_set ());
insn = emit_insn (gen_move_insn (addr, temp_reg));
}
@@ -7821,7 +7821,7 @@ s390_emit_prologue (void)
moved below the use of the stack slots. */
s390_emit_stack_tie ();
- insn = emit_insn (gen_add2_insn (temp_reg,
+ insn = emit_insn (gen_add2_insn (temp_reg,
GEN_INT (cfun_frame_layout.f8_offset)));
offset = 0;
@@ -7833,7 +7833,7 @@ s390_emit_prologue (void)
cfun_frame_layout.frame_size
+ cfun_frame_layout.f8_offset
+ offset);
-
+
insn = save_fpr (temp_reg, offset, i);
offset += 8;
RTX_FRAME_RELATED_P (insn) = 1;
@@ -7904,7 +7904,7 @@ s390_emit_epilogue (bool sibcall)
/* Check whether to use frame or stack pointer for restore. */
- frame_pointer = (frame_pointer_needed
+ frame_pointer = (frame_pointer_needed
? hard_frame_pointer_rtx : stack_pointer_rtx);
s390_frame_area (&area_bottom, &area_top);
@@ -7962,7 +7962,7 @@ s390_emit_epilogue (bool sibcall)
}
}
}
-
+
}
else
{
@@ -7978,7 +7978,7 @@ s390_emit_epilogue (bool sibcall)
else if (!TARGET_PACKED_STACK)
next_offset += 8;
}
-
+
}
/* Return register. */
@@ -8010,7 +8010,7 @@ s390_emit_epilogue (bool sibcall)
if (global_regs[i])
{
addr = plus_constant (frame_pointer,
- offset + cfun_frame_layout.gprs_offset
+ offset + cfun_frame_layout.gprs_offset
+ (i - cfun_frame_layout.first_save_gpr_slot)
* UNITS_PER_WORD);
addr = gen_rtx_MEM (Pmode, addr);
@@ -8035,7 +8035,7 @@ s390_emit_epilogue (bool sibcall)
addr = plus_constant (frame_pointer,
offset + cfun_frame_layout.gprs_offset
- + (RETURN_REGNUM
+ + (RETURN_REGNUM
- cfun_frame_layout.first_save_gpr_slot)
* UNITS_PER_WORD);
addr = gen_rtx_MEM (Pmode, addr);
@@ -8046,7 +8046,7 @@ s390_emit_epilogue (bool sibcall)
insn = restore_gprs (frame_pointer,
offset + cfun_frame_layout.gprs_offset
- + (cfun_frame_layout.first_restore_gpr
+ + (cfun_frame_layout.first_restore_gpr
- cfun_frame_layout.first_save_gpr_slot)
* UNITS_PER_WORD,
cfun_frame_layout.first_restore_gpr,
@@ -8456,7 +8456,7 @@ s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
size_int (-RETURN_REGNUM * UNITS_PER_WORD));
-
+
t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
@@ -8488,7 +8488,7 @@ s390_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
} */
static tree
-s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
gimple_seq *post_p ATTRIBUTE_UNUSED)
{
tree f_gpr, f_fpr, f_ovf, f_sav;
@@ -8588,9 +8588,9 @@ s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
gimplify_and_add (t, pre_p);
- t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
size_int (sav_ofs));
- u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
+ u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
@@ -8605,14 +8605,14 @@ s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
t = ovf;
if (size < UNITS_PER_WORD)
- t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
size_int (UNITS_PER_WORD - size));
gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
gimplify_assign (addr, t, pre_p);
- t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
size_int (size));
gimplify_assign (ovf, t, pre_p);
@@ -9268,7 +9268,7 @@ s390_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
{
*p1 = CC_REGNUM;
*p2 = INVALID_REGNUM;
-
+
return true;
}
@@ -9434,10 +9434,10 @@ s390_optimize_prologue (void)
/* If all special registers are in fact used, there's nothing we
can do, so no point in walking the insn list. */
- if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
+ if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
&& cfun_frame_layout.last_save_gpr >= BASE_REGNUM
- && (TARGET_CPU_ZARCH
- || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
+ && (TARGET_CPU_ZARCH
+ || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
&& cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
return;
@@ -9477,9 +9477,9 @@ s390_optimize_prologue (void)
if (cfun_frame_layout.first_save_gpr != -1)
{
- new_insn = save_gprs (base,
+ new_insn = save_gprs (base,
off + (cfun_frame_layout.first_save_gpr
- - first) * UNITS_PER_WORD,
+ - first) * UNITS_PER_WORD,
cfun_frame_layout.first_save_gpr,
cfun_frame_layout.last_save_gpr);
new_insn = emit_insn_before (new_insn, insn);
@@ -9538,9 +9538,9 @@ s390_optimize_prologue (void)
if (cfun_frame_layout.first_restore_gpr != -1)
{
- new_insn = restore_gprs (base,
+ new_insn = restore_gprs (base,
off + (cfun_frame_layout.first_restore_gpr
- - first) * UNITS_PER_WORD,
+ - first) * UNITS_PER_WORD,
cfun_frame_layout.first_restore_gpr,
cfun_frame_layout.last_restore_gpr);
new_insn = emit_insn_before (new_insn, insn);