aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/m68k/m68k.c
diff options
context:
space:
mode:
authorMaxim Kuvyrkov <maxim@codesourcery.com>2008-05-07 08:09:27 +0000
committerMaxim Kuvyrkov <maxim@codesourcery.com>2008-05-07 08:09:27 +0000
commite0b124e54bad9011c18de77a3575dbb95c93cda1 (patch)
treef70588bfff0b3c443ce5745fb7846edae048d3b9 /gcc/config/m68k/m68k.c
parent6ea2037bc04454de0a828b493c7acfe587fac6ed (diff)
Cleanup ColdFire scheduling support and add V4 pipeline model.
* config/m68k/m68k.md (UNSPEC_TIE): New constant. (define_attr cpu): Add cfv4 value. (define_attr type, define_attr type1): Merge into a single 'type' attribute. Update all uses. (define_attr opx_type, define_attr opy_type, define_attr opx_access): Rearrange and update. Rename value 'reg' to 'Rn', add value 'FPn'. Update all uses. (define_attr opx_mem, define_attr opy_mem): Remove. (define_attr op_mem): Clean up, update comment. (define_attr size): Use specific values instead of general int. (define_attr guess, define_attr split): Remove. Update all uses. (movdf_internal, tstsi_internal, tsthi_internal, tstqi_internal, tst<mode>_68881, pushexthisi_const, movsi_const0_68000_10, movsi_const0_68040_60, movsi_const0, movsi_cf, movstrictqi_cf, zero_extendhisi2_cf, zero_extendqisi2_cfv4, cfv4_extendhisi2, 68k_extendhisi2, extendqihi2, cfv4_extendqisi2, 68k_extendqisi2, floatsi<mode>2_68881, ftrunc<mode>2_68881, ftrunc<mode>2_cf, fix<mode>qi2_68881, fix<mode>hi2_68881, fix<mode>si2_68881, adddi_dishl32, addsi3_5200, add<mode>3_floatsi_68881, add<mode>3_floathi_68881, add<mode>3_floatqi_68881, add<mode>3_68881, add<mode>3_cf, subdi_dishl32, subsi3, sub<mode>3_floatsi_68881, sub<mode>3_floathi_68881, sub<mode>3_floatqi_68881, sub<mode>3_68881, sub<mode>3_cf, mulhi3, mulhisi3, mulhisisi3_s, mulsi3_68020, mulsi3_cf, umulhisi3, mulhisisi3_z, mul<mode>3_floatsi_68881, mul<mode>3_floathi_68881, mul<mode>3_floatqi_68881, fmul<mode>3_cf, div<mode>3_cf, sqrt<mode>2_cf, abs<mode>2_cf, clzsi2, one_cmplsi2_5200, subreghi1ashrdi_const32, ashrsi3, lshrsi3, bsetmemqi, bsetmemqi_ext, bclrmemqi, bclrmemqi_ext, beq, bne, bgt, blt, bordered, bunordered, buneq, bunge, bungt, bunle, bunlt, bltgt, tablejump_internal, call, non_symbolic_call_value, symbolic_call_value_jsr, symbolic_call_value_bsr, link): Update or set attributes. (stack_tie): New fake instruction. * config/m68k/m68k.h (TUNE_CFV4): New macro. (m68k_sched_attr_size): Update declaration. (m68k_sched_attr_type2): Remove. (m68k_sched_address_bypass_p, m68k_sched_indexed_address_bypass_p): Declare new bypass predicates. * config/m68k/m68k.c (m68k_sched_issue_rate, m68k_sched_first_cycle_multipass_dfa_lookahead): Declare hook implementations. (TARGET_SCHED_ISSUE_RATE, TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD): Override hooks. (override_options): Handle scheduling for ColdFire V4 core. (m68k_expand_prologue): Emit stack_tie. (enum attr_op_type): Split value 'OP_TYPE_REG' to 'OP_TYPE_RN' and 'OP_TYPE_FPN'. Update all uses. (sched_guess_p): Remove. (sched_address_type): Handle symbolic addresses. (sched_get_operand): New static function. (sched_operand_type): Merge into sched_attr_op_type. (sched_attr_op_type): Handle FP registers, handle quick constants, update. (m68k_sched_attr_opx_type, m68k_sched_attr_opy_type): Update. (m68k_sched_attr_size): Update. Move logic to ... (sched_get_attr_size_int): New static function. (sched_get_opxy_mem_type): New static function. (m68k_sched_attr_op_mem): Update. (m68k_sched_attr_type2): Remove. (sched_cfv4_bypass_data): New static variable. (m68k_sched_adjust_cost): Handle ColdFire V4 bypass. (m68k_sched_issue_rate): Implement scheduler hook. (struct _sched_ib: enabled_p): New field. (m68k_sched_variable_issue): Update. Handle V4. (SCHED_DUMP_TODO, SCHED_DUMP_DONE, SCHED_DUMP_NOTHING, sched_dump_class_func_t, sched_dump_split_class, sched_dump_dfa_guess_unit_code, sched_dump_dfa_state, sched_dump_dfa_class, m68k_sched_dump): Remove. (m68k_sched_first_cycle_multipass_dfa_lookahead): Implement scheduler hook. (m68k_sched_init_global): Remove statisctics dumping, introduce sanity check that all instructions have pipeline reservations. Handle ColdFire V4 core. (m68k_sched_dfa_pre_advance_cycle, m68k_sched_dfa_post_advance_cycle): Handle ColdFire V4 core. (sched_mem_operand_p, sched_get_reg_operand, sched_get_mem_operand): New static functions. (m68k_sched_address_bypass_p): New bypass predicate. (sched_get_indexed_address_scale): New static function. (m68k_sched_indexed_address_bypass_p): New bypass predicate. * cf.md: Update comments. (define_attr type2): Remove. Use 'type' attribute instead. Update all uses. (cf_ib): Rename to cfv123_ib. Update all uses. (cf_oep): Rename to cfv123_oep. Update all uses. (cf_chr): Rename to cfv123_chr. Update all uses. (cf_mem): Rename to cfv123_mem. Update all uses. (cf_mac): Move to more appropriate place. (cfv123_guess): New automaton and cpu_unit. (cfv123_*, cfv12_*, cfv1_*, cfv2_*, cfv3_*): Use type attribute. Update uses of 'size' attribute. Handle before reload scheduling. (cfv123_guess): New dummy reservation for unhandled instructions. (cfv4_*): Pipeline description of ColdFire V4 core. (ignore): New reservation to handle 'ignore' type. git-svn-id: https://gcc.gnu.org/svn/gcc/trunk@135033 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/m68k/m68k.c')
-rw-r--r--gcc/config/m68k/m68k.c869
1 files changed, 509 insertions, 360 deletions
diff --git a/gcc/config/m68k/m68k.c b/gcc/config/m68k/m68k.c
index 3fdd756ada9..52a60da0664 100644
--- a/gcc/config/m68k/m68k.c
+++ b/gcc/config/m68k/m68k.c
@@ -122,12 +122,14 @@ struct m68k_address {
};
static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
+static int m68k_sched_issue_rate (void);
static int m68k_sched_variable_issue (FILE *, int, rtx, int);
static void m68k_sched_md_init_global (FILE *, int, int);
static void m68k_sched_md_finish_global (FILE *, int);
static void m68k_sched_md_init (FILE *, int, int);
static void m68k_sched_dfa_pre_advance_cycle (void);
static void m68k_sched_dfa_post_advance_cycle (void);
+static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
static bool m68k_handle_option (size_t, const char *, int);
static rtx find_addr_reg (rtx);
@@ -199,6 +201,9 @@ int m68k_last_compare_had_fp_operands;
#undef TARGET_SCHED_ADJUST_COST
#define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
+
#undef TARGET_SCHED_VARIABLE_ISSUE
#define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
@@ -217,6 +222,10 @@ int m68k_last_compare_had_fp_operands;
#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
+ m68k_sched_first_cycle_multipass_dfa_lookahead
+
#undef TARGET_HANDLE_OPTION
#define TARGET_HANDLE_OPTION m68k_handle_option
@@ -672,6 +681,8 @@ override_options (void)
m68k_sched_cpu = CPU_CFV2;
else if (TUNE_CFV3)
m68k_sched_cpu = CPU_CFV3;
+ else if (TUNE_CFV4)
+ m68k_sched_cpu = CPU_CFV4;
else
{
m68k_sched_cpu = CPU_UNKNOWN;
@@ -1037,6 +1048,11 @@ m68k_expand_prologue (void)
stack_pointer_rtx,
GEN_INT (-fsize_with_regs))));
}
+
+ /* If the frame pointer is needed, emit a special barrier that
+ will prevent the scheduler from moving stores to the frame
+ before the stack adjustment. */
+ emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
}
else if (fsize_with_regs != 0)
m68k_set_frame_related
@@ -4579,8 +4595,11 @@ enum attr_op_type
/* No operand. */
OP_TYPE_NONE,
- /* Register. */
- OP_TYPE_REG,
+ /* Integer register. */
+ OP_TYPE_RN,
+
+ /* FP register. */
+ OP_TYPE_FPN,
/* Implicit mem reference (e.g. stack). */
OP_TYPE_MEM1,
@@ -4607,19 +4626,19 @@ enum attr_op_type
OP_TYPE_IMM_L
};
-/* True if current insn doesn't have complete pipeline description. */
-static bool sched_guess_p;
-
/* Return type of memory ADDR_RTX refers to. */
static enum attr_op_type
sched_address_type (enum machine_mode mode, rtx addr_rtx)
{
struct m68k_address address;
+ if (symbolic_operand (addr_rtx, VOIDmode))
+ return OP_TYPE_MEM7;
+
if (!m68k_decompose_address (mode, addr_rtx,
reload_completed, &address))
{
- gcc_assert (sched_guess_p);
+ gcc_assert (!reload_completed);
/* Reload will likely fix the address to be in the register. */
return OP_TYPE_MEM234;
}
@@ -4640,12 +4659,42 @@ sched_address_type (enum machine_mode mode, rtx addr_rtx)
return OP_TYPE_MEM7;
}
-/* Return type of the operand OP.
- If ADDRESS_P is true, return type of memory location OP refers to. */
+/* Return X or Y (depending on OPX_P) operand of INSN. */
+static rtx
+sched_get_operand (rtx insn, bool opx_p)
+{
+ int i;
+
+ if (recog_memoized (insn) < 0)
+ gcc_unreachable ();
+
+ extract_constrain_insn_cached (insn);
+
+ if (opx_p)
+ i = get_attr_opx (insn);
+ else
+ i = get_attr_opy (insn);
+
+ if (i >= recog_data.n_operands)
+ return NULL;
+
+ return recog_data.operand[i];
+}
+
+/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
+ If ADDRESS_P is true, return type of memory location operand refers to. */
static enum attr_op_type
-sched_operand_type (rtx op, bool address_p)
+sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
{
- gcc_assert (op != NULL_RTX);
+ rtx op;
+
+ op = sched_get_operand (insn, opx_p);
+
+ if (op == NULL)
+ {
+ gcc_assert (!reload_completed);
+ return OP_TYPE_RN;
+ }
if (address_p)
return sched_address_type (QImode, op);
@@ -4654,13 +4703,49 @@ sched_operand_type (rtx op, bool address_p)
return sched_address_type (GET_MODE (op), XEXP (op, 0));
if (register_operand (op, VOIDmode))
- return OP_TYPE_REG;
+ {
+ if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
+ || (reload_completed && FP_REG_P (op)))
+ return OP_TYPE_FPN;
+
+ return OP_TYPE_RN;
+ }
if (GET_CODE (op) == CONST_INT)
{
- /* ??? Below condition should probably check if the operation is
- signed or unsigned. */
- if (IN_RANGE (INTVAL (op), -0x8000, 0x7fff))
+ int ival;
+
+ ival = INTVAL (op);
+
+ /* Check for quick constants. */
+ switch (get_attr_type (insn))
+ {
+ case TYPE_ALUQ_L:
+ if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
+ return OP_TYPE_IMM_Q;
+
+ gcc_assert (!reload_completed);
+ break;
+
+ case TYPE_MOVEQ_L:
+ if (USE_MOVQ (ival))
+ return OP_TYPE_IMM_Q;
+
+ gcc_assert (!reload_completed);
+ break;
+
+ case TYPE_MOV3Q_L:
+ if (valid_mov3q_const (ival))
+ return OP_TYPE_IMM_Q;
+
+ gcc_assert (!reload_completed);
+ break;
+
+ default:
+ break;
+ }
+
+ if (IN_RANGE (ival, -0x8000, 0x7fff))
return OP_TYPE_IMM_W;
return OP_TYPE_IMM_L;
@@ -4706,32 +4791,12 @@ sched_operand_type (rtx op, bool address_p)
}
}
- gcc_assert (sched_guess_p);
-
- return OP_TYPE_REG;
-}
-
-/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
- If ADDRESS_P is true, return type of memory location operand refers to. */
-static enum attr_op_type
-sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
-{
- int i;
-
- extract_constrain_insn_cached (insn);
+ gcc_assert (!reload_completed);
- if (opx_p)
- i = get_attr_opx (insn);
- else
- i = get_attr_opy (insn);
+ if (FLOAT_MODE_P (GET_MODE (op)))
+ return OP_TYPE_FPN;
- if (i >= recog_data.n_operands)
- {
- gcc_assert (sched_guess_p);
- return OP_TYPE_REG;
- }
-
- return sched_operand_type (recog_data.operand[i], address_p);
+ return OP_TYPE_RN;
}
/* Implement opx_type attribute.
@@ -4740,12 +4805,13 @@ sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
enum attr_opx_type
m68k_sched_attr_opx_type (rtx insn, int address_p)
{
- sched_guess_p = (get_attr_guess (insn) == GUESS_YES);
-
switch (sched_attr_op_type (insn, true, address_p != 0))
{
- case OP_TYPE_REG:
- return OPX_TYPE_REG;
+ case OP_TYPE_RN:
+ return OPX_TYPE_RN;
+
+ case OP_TYPE_FPN:
+ return OPX_TYPE_FPN;
case OP_TYPE_MEM1:
return OPX_TYPE_MEM1;
@@ -4783,12 +4849,13 @@ m68k_sched_attr_opx_type (rtx insn, int address_p)
enum attr_opy_type
m68k_sched_attr_opy_type (rtx insn, int address_p)
{
- sched_guess_p = (get_attr_guess (insn) == GUESS_YES);
-
switch (sched_attr_op_type (insn, false, address_p != 0))
{
- case OP_TYPE_REG:
- return OPY_TYPE_REG;
+ case OP_TYPE_RN:
+ return OPY_TYPE_RN;
+
+ case OP_TYPE_FPN:
+ return OPY_TYPE_FPN;
case OP_TYPE_MEM1:
return OPY_TYPE_MEM1;
@@ -4820,17 +4887,21 @@ m68k_sched_attr_opy_type (rtx insn, int address_p)
}
}
-/* Return the size of INSN. */
-int
-m68k_sched_attr_size (rtx insn)
+/* Return size of INSN as int. */
+static int
+sched_get_attr_size_int (rtx insn)
{
int size;
- sched_guess_p = (get_attr_guess (insn) == GUESS_YES);
-
- switch (get_attr_type1 (insn))
+ switch (get_attr_type (insn))
{
- case TYPE1_MUL_L:
+ case TYPE_IGNORE:
+ /* There should be no references to m68k_sched_attr_size for 'ignore'
+ instructions. */
+ gcc_unreachable ();
+ return 0;
+
+ case TYPE_MUL_L:
size = 2;
break;
@@ -4842,7 +4913,8 @@ m68k_sched_attr_size (rtx insn)
switch (get_attr_opx_type (insn))
{
case OPX_TYPE_NONE:
- case OPX_TYPE_REG:
+ case OPX_TYPE_RN:
+ case OPX_TYPE_FPN:
case OPX_TYPE_MEM1:
case OPX_TYPE_MEM234:
case OPY_TYPE_IMM_Q:
@@ -4867,7 +4939,8 @@ m68k_sched_attr_size (rtx insn)
switch (get_attr_opy_type (insn))
{
case OPY_TYPE_NONE:
- case OPY_TYPE_REG:
+ case OPY_TYPE_RN:
+ case OPY_TYPE_FPN:
case OPY_TYPE_MEM1:
case OPY_TYPE_MEM234:
case OPY_TYPE_IMM_Q:
@@ -4891,7 +4964,7 @@ m68k_sched_attr_size (rtx insn)
if (size > 3)
{
- gcc_assert (sched_guess_p);
+ gcc_assert (!reload_completed);
size = 3;
}
@@ -4899,22 +4972,100 @@ m68k_sched_attr_size (rtx insn)
return size;
}
+/* Return size of INSN as attribute enum value. */
+enum attr_size
+m68k_sched_attr_size (rtx insn)
+{
+ switch (sched_get_attr_size_int (insn))
+ {
+ case 1:
+ return SIZE_1;
+
+ case 2:
+ return SIZE_2;
+
+ case 3:
+ return SIZE_3;
+
+ default:
+ gcc_unreachable ();
+ return 0;
+ }
+}
+
+/* Return operand X or Y (depending on OPX_P) of INSN,
+ if it is a MEM, or NULL overwise. */
+static enum attr_op_type
+sched_get_opxy_mem_type (rtx insn, bool opx_p)
+{
+ if (opx_p)
+ {
+ switch (get_attr_opx_type (insn))
+ {
+ case OPX_TYPE_NONE:
+ case OPX_TYPE_RN:
+ case OPX_TYPE_FPN:
+ case OPX_TYPE_IMM_Q:
+ case OPX_TYPE_IMM_W:
+ case OPX_TYPE_IMM_L:
+ return OP_TYPE_RN;
+
+ case OPX_TYPE_MEM1:
+ case OPX_TYPE_MEM234:
+ case OPX_TYPE_MEM5:
+ case OPX_TYPE_MEM7:
+ return OP_TYPE_MEM1;
+
+ case OPX_TYPE_MEM6:
+ return OP_TYPE_MEM6;
+
+ default:
+ gcc_unreachable ();
+ return 0;
+ }
+ }
+ else
+ {
+ switch (get_attr_opy_type (insn))
+ {
+ case OPY_TYPE_NONE:
+ case OPY_TYPE_RN:
+ case OPY_TYPE_FPN:
+ case OPY_TYPE_IMM_Q:
+ case OPY_TYPE_IMM_W:
+ case OPY_TYPE_IMM_L:
+ return OP_TYPE_RN;
+
+ case OPY_TYPE_MEM1:
+ case OPY_TYPE_MEM234:
+ case OPY_TYPE_MEM5:
+ case OPY_TYPE_MEM7:
+ return OP_TYPE_MEM1;
+
+ case OPY_TYPE_MEM6:
+ return OP_TYPE_MEM6;
+
+ default:
+ gcc_unreachable ();
+ return 0;
+ }
+ }
+}
+
/* Implement op_mem attribute. */
enum attr_op_mem
m68k_sched_attr_op_mem (rtx insn)
{
- enum attr_opy_mem opy;
- enum attr_opx_mem opx;
+ enum attr_op_type opx;
+ enum attr_op_type opy;
- sched_guess_p = (get_attr_guess (insn) == GUESS_YES);
+ opx = sched_get_opxy_mem_type (insn, true);
+ opy = sched_get_opxy_mem_type (insn, false);
- opy = get_attr_opy_mem (insn);
- opx = get_attr_opx_mem (insn);
-
- if (opy == OPY_MEM_R && opx == OPX_MEM_R)
+ if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
return OP_MEM_00;
- if (opy == OPY_MEM_R && opx == OPX_MEM_M)
+ if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
{
switch (get_attr_opx_access (insn))
{
@@ -4928,12 +5079,12 @@ m68k_sched_attr_op_mem (rtx insn)
return OP_MEM_11;
default:
- gcc_assert (sched_guess_p);
- return OP_MEM_UNKNOWN;
+ gcc_unreachable ();
+ return 0;
}
}
- if (opy == OPY_MEM_R && opx == OPX_MEM_I)
+ if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
{
switch (get_attr_opx_access (insn))
{
@@ -4947,15 +5098,15 @@ m68k_sched_attr_op_mem (rtx insn)
return OP_MEM_I1;
default:
- gcc_assert (sched_guess_p);
- return OP_MEM_UNKNOWN;
+ gcc_unreachable ();
+ return 0;
}
}
- if (opy == OPY_MEM_M && opx == OPX_MEM_R)
+ if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
return OP_MEM_10;
- if (opy == OPY_MEM_M && opx == OPX_MEM_M)
+ if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
{
switch (get_attr_opx_access (insn))
{
@@ -4963,12 +5114,12 @@ m68k_sched_attr_op_mem (rtx insn)
return OP_MEM_11;
default:
- gcc_assert (sched_guess_p);
- return OP_MEM_UNKNOWN;
+ gcc_assert (!reload_completed);
+ return OP_MEM_11;
}
}
- if (opy == OPY_MEM_M && opx == OPX_MEM_I)
+ if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
{
switch (get_attr_opx_access (insn))
{
@@ -4976,16 +5127,15 @@ m68k_sched_attr_op_mem (rtx insn)
return OP_MEM_1I;
default:
- gcc_assert (sched_guess_p);
- return OP_MEM_UNKNOWN;
+ gcc_assert (!reload_completed);
+ return OP_MEM_1I;
}
}
- if (opy == OPY_MEM_I && opx == OPX_MEM_R)
+ if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
return OP_MEM_I0;
-
- if (opy == OPY_MEM_I && opx == OPX_MEM_M)
+ if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
{
switch (get_attr_opx_access (insn))
{
@@ -4993,13 +5143,14 @@ m68k_sched_attr_op_mem (rtx insn)
return OP_MEM_I1;
default:
- gcc_assert (sched_guess_p);
- return OP_MEM_UNKNOWN;
+ gcc_assert (!reload_completed);
+ return OP_MEM_I1;
}
}
- gcc_assert (sched_guess_p);
- return OP_MEM_UNKNOWN;
+ gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
+ gcc_assert (!reload_completed);
+ return OP_MEM_I1;
}
/* Jump instructions types. Indexed by INSN_UID.
@@ -5022,80 +5173,21 @@ m68k_sched_branch_type (rtx insn)
return type;
}
-/* Implement type2 attribute. */
-enum attr_type2
-m68k_sched_attr_type2 (rtx insn)
+/* Data for ColdFire V4 index bypass.
+ Producer modifies register that is used as index in consumer with
+ specified scale. */
+static struct
{
- switch (get_attr_type1 (insn))
- {
- case TYPE1_ALU_L:
- case TYPE1_ALUQ_L:
- case TYPE1_CMP_L:
- return TYPE2_ALU;
+ /* Producer instruction. */
+ rtx pro;
- case TYPE1_ALU_REG1:
- case TYPE1_ALU_REGX:
- return TYPE2_ALU_REG;
+ /* Consumer instruction. */
+ rtx con;
- case TYPE1_BCC:
- return TYPE2_BCC;
-
- case TYPE1_BRA:
- return TYPE2_BRA;
-
- case TYPE1_BSR:
- case TYPE1_JSR:
- return TYPE2_CALL;
-
- case TYPE1_JMP:
- return TYPE2_JMP;
-
- case TYPE1_LEA:
- return TYPE2_LEA;
-
- case TYPE1_CLR:
- case TYPE1_MOV3Q_L:
- case TYPE1_MOVE:
- case TYPE1_MOVEQ_L:
- case TYPE1_TST:
- switch (m68k_sched_cpu)
- {
- case CPU_CFV1:
- return TYPE2_OMOVE;
-
- case CPU_CFV2:
- case CPU_CFV3:
- return TYPE2_ALU;
-
- default:
- gcc_assert (get_attr_guess (insn) == GUESS_YES);
- return TYPE2_UNKNOWN;
- }
-
- case TYPE1_MUL_L:
- return TYPE2_MUL_L;
-
- case TYPE1_MUL_W:
- return TYPE2_MUL_W;
-
- case TYPE1_MOVE_L:
- case TYPE1_TST_L:
- return TYPE2_OMOVE;
-
- case TYPE1_PEA:
- return TYPE2_PEA;
-
- case TYPE1_RTS:
- return TYPE2_RTS;
-
- case TYPE1_UNLK:
- return TYPE2_UNLK;
-
- default:
- gcc_assert (get_attr_guess (insn) == GUESS_YES);
- return TYPE2_UNKNOWN;
- }
-}
+ /* Scale of indexed memory access within consumer.
+ Or zero if bypass should not be effective at the moment. */
+ int scale;
+} sched_cfv4_bypass_data;
/* An empty state that is used in m68k_sched_adjust_cost. */
static state_t sched_adjust_cost_state;
@@ -5112,13 +5204,33 @@ m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
|| recog_memoized (insn) < 0)
return cost;
+ if (sched_cfv4_bypass_data.scale == 1)
+ /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
+ {
+ /* haifa-sched.c: insn_cost () calls bypass_p () just before
+ targetm.sched.adjust_cost (). Hence, we can be relatively sure
+ that the data in sched_cfv4_bypass_data is up to date. */
+ gcc_assert (sched_cfv4_bypass_data.pro == def_insn
+ && sched_cfv4_bypass_data.con == insn);
+
+ if (cost < 3)
+ cost = 3;
+
+ sched_cfv4_bypass_data.pro = NULL;
+ sched_cfv4_bypass_data.con = NULL;
+ sched_cfv4_bypass_data.scale = 0;
+ }
+ else
+ gcc_assert (sched_cfv4_bypass_data.pro == NULL
+ && sched_cfv4_bypass_data.con == NULL
+ && sched_cfv4_bypass_data.scale == 0);
+
/* Don't try to issue INSN earlier than DFA permits.
This is especially useful for instructions that write to memory,
as their true dependence (default) latency is better to be set to 0
to workaround alias analysis limitations.
This is, in fact, a machine independent tweak, so, probably,
it should be moved to haifa-sched.c: insn_cost (). */
-
delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
if (delay > cost)
cost = delay;
@@ -5126,6 +5238,26 @@ m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
return cost;
}
+/* Return maximal number of insns that can be scheduled on a single cycle. */
+static int
+m68k_sched_issue_rate (void)
+{
+ switch (m68k_sched_cpu)
+ {
+ case CPU_CFV1:
+ case CPU_CFV2:
+ case CPU_CFV3:
+ return 1;
+
+ case CPU_CFV4:
+ return 2;
+
+ default:
+ gcc_unreachable ();
+ return 0;
+ }
+}
+
/* Maximal length of instruction for current CPU.
E.g. it is 3 for any ColdFire core. */
static int max_insn_size;
@@ -5133,6 +5265,9 @@ static int max_insn_size;
/* Data to model instruction buffer of CPU. */
struct _sched_ib
{
+ /* True if instruction buffer model is modeled for current CPU. */
+ bool enabled_p;
+
/* Size of the instruction buffer in words. */
int size;
@@ -5175,17 +5310,17 @@ m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
{
int insn_size;
- if (recog_memoized (insn) >= 0)
+ if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
{
switch (m68k_sched_cpu)
{
case CPU_CFV1:
case CPU_CFV2:
- insn_size = get_attr_size (insn);
+ insn_size = sched_get_attr_size_int (insn);
break;
case CPU_CFV3:
- insn_size = get_attr_size (insn);
+ insn_size = sched_get_attr_size_int (insn);
/* ColdFire V3 and V4 cores have instruction buffers that can
accumulate up to 8 instructions regardless of instructions'
@@ -5215,6 +5350,11 @@ m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
break;
+ case CPU_CFV4:
+ gcc_assert (!sched_ib.enabled_p);
+ insn_size = 0;
+ break;
+
default:
gcc_unreachable ();
}
@@ -5233,194 +5373,12 @@ m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
return can_issue_more;
}
-/* Statistics gatherer. */
-
-typedef enum
- {
- /* Something needs to be done for this insn. */
- SCHED_DUMP_TODO,
-
- /* Support for this insn is complete. */
- SCHED_DUMP_DONE,
-
- /* This insn didn't require much effort to support it. */
- SCHED_DUMP_NOTHING
- } sched_dump_class_def;
-
-/* Pointer to functions that classifies insns into 3 above classes. */
-typedef sched_dump_class_def (*sched_dump_class_func_t) (rtx);
-
-/* Return statistical type of INSN regarding splits. */
-static sched_dump_class_def
-sched_dump_split_class (rtx insn)
-{
- int i;
-
- i = recog_memoized (insn);
- gcc_assert (i >= 0);
-
- switch (get_attr_split (insn))
- {
- case SPLIT_TODO:
- return SCHED_DUMP_TODO;
-
- case SPLIT_DONE:
- return SCHED_DUMP_DONE;
-
- case SPLIT_NOTHING:
- return SCHED_DUMP_NOTHING;
-
- default:
- gcc_unreachable ();
- }
-}
-
-/* ID of the guess unit. */
-static int sched_dump_dfa_guess_unit_code;
-
-/* DFA state for use in sched_dump_dfa_class (). */
-static state_t sched_dump_dfa_state;
-
-/* Return statistical type of INSN regarding DFA reservations. */
-static sched_dump_class_def
-sched_dump_dfa_class (rtx insn)
+/* Return how many instructions should scheduler lookahead to choose the
+ best one. */
+static int
+m68k_sched_first_cycle_multipass_dfa_lookahead (void)
{
- int i;
-
- i = recog_memoized (insn);
- gcc_assert (i >= 0 && insn_has_dfa_reservation_p (insn));
-
- if (sched_dump_split_class (insn) == SCHED_DUMP_TODO)
- /* Insn is not yet ready for reservations. */
- return SCHED_DUMP_NOTHING;
-
- state_reset (sched_dump_dfa_state);
-
- if (state_transition (sched_dump_dfa_state, insn) >= 0)
- gcc_unreachable ();
-
- if (cpu_unit_reservation_p (sched_dump_dfa_state,
- sched_dump_dfa_guess_unit_code))
- return SCHED_DUMP_TODO;
-
- return SCHED_DUMP_DONE;
-}
-
-/* Dump statistics on current function into file DUMP_FILENAME and prefix
- each entry with PREFIX.
- Instructions are classified with DUMP_CLASS. */
-static void
-m68k_sched_dump (sched_dump_class_func_t dump_class,
- const char *prefix, FILE *dump)
-{
- sbitmap present;
- int *todos;
- int *dones;
- int *nothings;
- rtx insn;
-
- gcc_assert (dump != NULL);
-
- present = sbitmap_alloc (CODE_FOR_nothing);
- sbitmap_zero (present);
-
- todos = xcalloc (CODE_FOR_nothing, sizeof (*todos));
- dones = xcalloc (CODE_FOR_nothing, sizeof (*dones));
- nothings = xcalloc (CODE_FOR_nothing, sizeof (*nothings));
-
- /* Gather statistics. */
- for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
- {
- if (INSN_P (insn) && recog_memoized (insn) >= 0)
- {
- enum insn_code code;
-
- code = INSN_CODE (insn);
- gcc_assert (code < CODE_FOR_nothing);
-
- SET_BIT (present, code);
-
- switch (dump_class (insn))
- {
- case SCHED_DUMP_TODO:
- ++todos[code];
- break;
-
- case SCHED_DUMP_DONE:
- ++dones[code];
- break;
-
- case SCHED_DUMP_NOTHING:
- ++nothings[code];
- break;
- }
- }
- }
-
- /* Print statisctics. */
- {
- unsigned int i;
- sbitmap_iterator si;
- int total_todo;
- int total_done;
- int total_nothing;
-
- total_todo = 0;
- total_done = 0;
- total_nothing = 0;
-
- EXECUTE_IF_SET_IN_SBITMAP (present, 0, i, si)
- {
- int todo;
- int done;
- int nothing;
- enum insn_code code;
-
- code = (enum insn_code) i;
-
- todo = todos[code];
- done = dones[code];
- nothing = nothings[code];
-
- total_todo += todo;
- total_done += done;
- total_nothing += nothing;
-
- if (todo != 0)
- {
- fprintf (dump,
- "%s: %3d: %d / %d / %d ;",
- prefix, code, todo, done, nothing);
-
- {
- const char *name;
-
- name = get_insn_name (code);
-
- if (name != NULL)
- fprintf (dump, " {%s}\n", name);
- else
- fprintf (dump, " {unknown}\n");
- }
- }
- }
-
- gcc_assert (CODE_FOR_nothing < 999);
-
- fprintf (dump,
- "%s: 999: %d / %d / %d ; {total}\n",
- prefix, total_todo, total_done, total_nothing);
- }
-
- free (nothings);
- nothings = NULL;
- free (dones);
- dones = NULL;
- free (todos);
- todos = NULL;
-
- sbitmap_free (present);
- present = NULL;
+ return m68k_sched_issue_rate () - 1;
}
/* Implementation of targetm.sched.md_init_global () hook.
@@ -5446,25 +5404,43 @@ m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
}
}
- if (reload_completed && sched_verbose >= 8)
- /* Dump statistics. */
- {
- m68k_sched_dump (sched_dump_split_class, "m68k_sched_split",
- sched_dump);
+#ifdef ENABLE_CHECKING
+ /* Check that all instructions have DFA reservations and
+ that all instructions can be issued from a clean state. */
+ {
+ rtx insn;
+ state_t state;
- sched_dump_dfa_guess_unit_code = get_cpu_unit_code ("cf_guess");
- sched_dump_dfa_state = alloca (state_size ());
+ state = alloca (state_size ());
- m68k_sched_dump (sched_dump_dfa_class, "m68k_sched_dfa",
- sched_dump);
+ for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
+ {
+ if (INSN_P (insn) && recog_memoized (insn) >= 0)
+ {
+ gcc_assert (insn_has_dfa_reservation_p (insn));
- sched_dump_dfa_state = NULL;
- sched_dump_dfa_guess_unit_code = 0;
- }
+ state_reset (state);
+ if (state_transition (state, insn) >= 0)
+ gcc_unreachable ();
+ }
+ }
+ }
+#endif
/* Setup target cpu. */
+
+ /* ColdFire V4 has a set of features to keep its instruction buffer full
+ (e.g., a separate memory bus for instructions) and, hence, we do not model
+ buffer for this CPU. */
+ sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
+
switch (m68k_sched_cpu)
{
+ case CPU_CFV4:
+ sched_ib.filled = 0;
+
+ /* FALLTHRU */
+
case CPU_CFV1:
case CPU_CFV2:
max_insn_size = 3;
@@ -5538,13 +5514,19 @@ m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
sched_ib.records.adjust_index = 0;
break;
+ case CPU_CFV4:
+ gcc_assert (!sched_ib.enabled_p);
+ sched_ib.size = 0;
+ break;
+
default:
gcc_unreachable ();
}
- /* haifa-sched.c: schedule_block () calls advance_cycle () just before
- the first cycle. Workaround that. */
- sched_ib.filled = -2;
+ if (sched_ib.enabled_p)
+ /* haifa-sched.c: schedule_block () calls advance_cycle () just before
+ the first cycle. Workaround that. */
+ sched_ib.filled = -2;
}
/* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
@@ -5553,6 +5535,9 @@ m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
static void
m68k_sched_dfa_pre_advance_cycle (void)
{
+ if (!sched_ib.enabled_p)
+ return;
+
if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
{
sched_ib.filled += 2;
@@ -5572,6 +5557,9 @@ m68k_sched_dfa_post_advance_cycle (void)
{
int i;
+ if (!sched_ib.enabled_p)
+ return;
+
/* Setup number of prefetched instruction words in the instruction
buffer. */
i = max_insn_size - sched_ib.filled;
@@ -5582,3 +5570,164 @@ m68k_sched_dfa_post_advance_cycle (void)
gcc_unreachable ();
}
}
+
+/* Return X or Y (depending on OPX_P) operand of INSN,
+ if it is an integer register, or NULL overwise. */
+static rtx
+sched_get_reg_operand (rtx insn, bool opx_p)
+{
+ rtx op = NULL;
+
+ if (opx_p)
+ {
+ if (get_attr_opx_type (insn) == OPX_TYPE_RN)
+ {
+ op = sched_get_operand (insn, true);
+ gcc_assert (op != NULL);
+
+ if (!reload_completed && !REG_P (op))
+ return NULL;
+ }
+ }
+ else
+ {
+ if (get_attr_opy_type (insn) == OPY_TYPE_RN)
+ {
+ op = sched_get_operand (insn, false);
+ gcc_assert (op != NULL);
+
+ if (!reload_completed && !REG_P (op))
+ return NULL;
+ }
+ }
+
+ return op;
+}
+
+/* Return true, if X or Y (depending on OPX_P) operand of INSN
+ is a MEM. */
+static bool
+sched_mem_operand_p (rtx insn, bool opx_p)
+{
+ switch (sched_get_opxy_mem_type (insn, opx_p))
+ {
+ case OP_TYPE_MEM1:
+ case OP_TYPE_MEM6:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/* Return X or Y (depending on OPX_P) operand of INSN,
+ if it is a MEM, or NULL overwise. */
+static rtx
+sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
+{
+ bool opx_p;
+ bool opy_p;
+
+ opx_p = false;
+ opy_p = false;
+
+ if (must_read_p)
+ {
+ opx_p = true;
+ opy_p = true;
+ }
+
+ if (must_write_p)
+ {
+ opx_p = true;
+ opy_p = false;
+ }
+
+ if (opy_p && sched_mem_operand_p (insn, false))
+ return sched_get_operand (insn, false);
+
+ if (opx_p && sched_mem_operand_p (insn, true))
+ return sched_get_operand (insn, true);
+
+ gcc_unreachable ();
+ return NULL;
+}
+
+/* Return non-zero if PRO modifies register used as part of
+ address in CON. */
+int
+m68k_sched_address_bypass_p (rtx pro, rtx con)
+{
+ rtx pro_x;
+ rtx con_mem_read;
+
+ pro_x = sched_get_reg_operand (pro, true);
+ if (pro_x == NULL)
+ return 0;
+
+ con_mem_read = sched_get_mem_operand (con, true, false);
+ gcc_assert (con_mem_read != NULL);
+
+ if (reg_mentioned_p (pro_x, con_mem_read))
+ return 1;
+
+ return 0;
+}
+
+/* Helper function for m68k_sched_indexed_address_bypass_p.
+ if PRO modifies register used as index in CON,
+ return scale of indexed memory access in CON. Return zero overwise. */
+static int
+sched_get_indexed_address_scale (rtx pro, rtx con)
+{
+ rtx reg;
+ rtx mem;
+ struct m68k_address address;
+
+ reg = sched_get_reg_operand (pro, true);
+ if (reg == NULL)
+ return 0;
+
+ mem = sched_get_mem_operand (con, true, false);
+ gcc_assert (mem != NULL && MEM_P (mem));
+
+ if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
+ &address))
+ gcc_unreachable ();
+
+ if (REGNO (reg) == REGNO (address.index))
+ {
+ gcc_assert (address.scale != 0);
+ return address.scale;
+ }
+
+ return 0;
+}
+
+/* Return non-zero if PRO modifies register used
+ as index with scale 2 or 4 in CON. */
+int
+m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
+{
+ gcc_assert (sched_cfv4_bypass_data.pro == NULL
+ && sched_cfv4_bypass_data.con == NULL
+ && sched_cfv4_bypass_data.scale == 0);
+
+ switch (sched_get_indexed_address_scale (pro, con))
+ {
+ case 1:
+ /* We can't have a variable latency bypass, so
+ remember to adjust the insn cost in adjust_cost hook. */
+ sched_cfv4_bypass_data.pro = pro;
+ sched_cfv4_bypass_data.con = con;
+ sched_cfv4_bypass_data.scale = 1;
+ return 0;
+
+ case 2:
+ case 4:
+ return 1;
+
+ default:
+ return 0;
+ }
+}