aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrey Belevantsev <abel@ispras.ru>2008-04-14 14:46:37 +0000
committerAndrey Belevantsev <abel@ispras.ru>2008-04-14 14:46:37 +0000
commitd9640a05f3ad1f59fe9cbefdf3b563c38947f0b8 (patch)
tree062c1b4c18feb6fedb22cb92e10f1d87b193992f
parent63ae2f4b3e7a62df623dd24a5a13963d22b4d1d1 (diff)
* sel-sched.c (extract_new_fences_from): Move fence to the new ones
only if the succ insn has not yet been scheduled. (find_best_expr): Tidy. (remove_insns_for_debug, compute_av_set_on_boundaries, find_sequential_best_exprs, move_nop_to_previous_block, prepare_place_to_insert, move_exprs_to_boundary, advance_state_on_fence, update_fence_and_insn, update_boundaries, schedule_expr_on_boundary): Split from ... (fill_insns): ... here. (move_op_ascend): Do not propagate through NOPs. (move_op_process_successors): Rename to code_motion_process_successors. Fix assert. (code_motion_path_driver): Change return type to int. Return -1 when we have found the previously created bookkeeping. (schedule_on_fences, find_min_max_seqno, calculate_new_fences, update_seqnos_and_stage): Split from ... (sel_sched_region_2): ... here. * sched-deps.h: Inline into sched-int.h. Kill. * sel-sched-dump.c (debug_vinsn, debug_vinsn_1, debug_av_set, debug_lv_set, debug_ilist, debug_insn_vector, debug_hard_reg_set): Fix '\n' printing. git-svn-id: https://gcc.gnu.org/svn/gcc/branches/sel-sched-branch@134270 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog.sel-sched24
-rw-r--r--gcc/Makefile.in7
-rw-r--r--gcc/config/ia64/ia64.c1
-rw-r--r--gcc/config/rs6000/rs6000.c1
-rw-r--r--gcc/ddg.c1
-rw-r--r--gcc/haifa-sched.c1
-rw-r--r--gcc/modulo-sched.c1
-rw-r--r--gcc/sched-deps.c1
-rw-r--r--gcc/sched-ebb.c1
-rw-r--r--gcc/sched-int.h183
-rw-r--r--gcc/sched-rgn.c1
-rw-r--r--gcc/sel-sched-dump.c10
-rw-r--r--gcc/sel-sched-ir.c1
-rw-r--r--gcc/sel-sched.c1316
14 files changed, 924 insertions, 625 deletions
diff --git a/gcc/ChangeLog.sel-sched b/gcc/ChangeLog.sel-sched
index b21feb1bf64..12fd7af29d2 100644
--- a/gcc/ChangeLog.sel-sched
+++ b/gcc/ChangeLog.sel-sched
@@ -1,5 +1,29 @@
2008-04-14 Andrey Belevantsev <abel@ispras.ru>
+ * sel-sched.c (extract_new_fences_from): Move fence to the new ones
+ only if the succ insn has not yet been scheduled.
+ (find_best_expr): Tidy.
+ (remove_insns_for_debug, compute_av_set_on_boundaries,
+ find_sequential_best_exprs, move_nop_to_previous_block,
+ prepare_place_to_insert, move_exprs_to_boundary,
+ advance_state_on_fence, update_fence_and_insn,
+ update_boundaries, schedule_expr_on_boundary): Split from ...
+ (fill_insns): ... here.
+ (move_op_ascend): Do not propagate through NOPs.
+ (move_op_process_successors): Rename to code_motion_process_successors.
+ Fix assert.
+ (code_motion_path_driver): Change return type to int.
+ Return -1 when we have found the previously created bookkeeping.
+ (schedule_on_fences, find_min_max_seqno, calculate_new_fences,
+ update_seqnos_and_stage): Split from ...
+ (sel_sched_region_2): ... here.
+ * sched-deps.h: Inline into sched-int.h. Kill.
+ * sel-sched-dump.c (debug_vinsn, debug_vinsn_1, debug_av_set,
+ debug_lv_set, debug_ilist, debug_insn_vector,
+ debug_hard_reg_set): Fix '\n' printing.
+
+2008-04-14 Andrey Belevantsev <abel@ispras.ru>
+
* sched-deps.c (sched_deps_init): Tidy.
* sel-sched-ir.c (init_fence_for_scheduling): New.
(flist_add): Use it.
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 813c50d0da2..003fbe430ec 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -796,7 +796,6 @@ REGS_H = regs.h varray.h $(MACHMODE_H) $(OBSTACK_H) $(BASIC_BLOCK_H) $(FUNCTION_
RA_H = ra.h $(REGS_H)
RESOURCE_H = resource.h hard-reg-set.h
SCHED_INT_H = sched-int.h $(INSN_ATTR_H) $(BASIC_BLOCK_H) $(RTL_H) $(DF_H) vecprim.h
-SCHED_DEPS_H = sched-deps.h $(SCHED_INT_H)
SEL_SCHED_IR_H = sel-sched-ir.h $(INSN_ATTR_H) $(BASIC_BLOCK_H) $(RTL_H) \
$(GGC_H) $(SCHED_INT_H) sched-rgn.h
SEL_SCHED_DUMP_H = sel-sched-dump.h $(SEL_SCHED_IR_H)
@@ -2802,7 +2801,7 @@ haifa-sched.o : haifa-sched.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_
sched-deps.o : sched-deps.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h \
$(FUNCTION_H) $(INSN_ATTR_H) toplev.h $(RECOG_H) except.h $(SCHED_INT_H) \
- sched-deps.h $(PARAMS_H) cselib.h $(TM_P_H) $(DF_H)
+ $(PARAMS_H) cselib.h $(TM_P_H) $(DF_H)
sched-rgn.o : sched-rgn.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(SCHED_INT_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h \
$(FUNCTION_H) $(INSN_ATTR_H) toplev.h $(RECOG_H) except.h $(PARAMS_H) \
@@ -2819,7 +2818,7 @@ sel-sched.o : sel-sched.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h \
$(FUNCTION_H) $(INSN_ATTR_H) toplev.h $(RECOG_H) except.h $(PARAMS_H) \
$(TM_P_H) $(TARGET_H) $(CFGLAYOUT_H) $(TIMEVAR_H) tree-pass.h sched-rgn.h \
- $(SCHED_INT_H) sched-deps.h $(GGC_H) $(TREE_H) $(LANGHOOKS_DEF_H) \
+ $(SCHED_INT_H) $(GGC_H) $(TREE_H) $(LANGHOOKS_DEF_H) \
$(SEL_SCHED_IR_H) $(SEL_SCHED_DUMP_H) sel-sched.h
sel-sched-dump.o : sel-sched-dump.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h \
@@ -2830,7 +2829,7 @@ sel-sched-ir.o : sel-sched-ir.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h \
$(FUNCTION_H) $(INSN_ATTR_H) toplev.h $(RECOG_H) except.h $(PARAMS_H) \
$(TM_P_H) $(TARGET_H) $(CFGLAYOUT_H) $(TIMEVAR_H) tree-pass.h sched-rgn.h \
- sched-deps.h $(GGC_H) $(TREE_H) $(LANGHOOKS_DEF_H) $(SEL_SCHED_IR_H)
+ $(SCHED_INT_H) $(GGC_H) $(TREE_H) $(LANGHOOKS_DEF_H) $(SEL_SCHED_IR_H)
final.o : final.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
$(TREE_H) $(FLAGS_H) intl.h $(REGS_H) $(RECOG_H) conditions.h \
insn-config.h $(INSN_ATTR_H) $(FUNCTION_H) output.h hard-reg-set.h \
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 49508f93361..0d2f929ff67 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -43,7 +43,6 @@ along with GCC; see the file COPYING3. If not see
#include "basic-block.h"
#include "toplev.h"
#include "sched-int.h"
-#include "sched-deps.h"
#include "timevar.h"
#include "target.h"
#include "target-def.h"
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 9ade6437f40..31944cc8c37 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -52,7 +52,6 @@
#include "reload.h"
#include "cfglayout.h"
#include "sched-int.h"
-#include "sched-deps.h"
#include "tree-gimple.h"
#include "tree-flow.h"
#include "intl.h"
diff --git a/gcc/ddg.c b/gcc/ddg.c
index 2a9b2e382ee..14b18745823 100644
--- a/gcc/ddg.c
+++ b/gcc/ddg.c
@@ -36,7 +36,6 @@ along with GCC; see the file COPYING3. If not see
#include "except.h"
#include "recog.h"
#include "sched-int.h"
-#include "sched-deps.h"
#include "target.h"
#include "cfglayout.h"
#include "cfgloop.h"
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index 6a3cf1ce862..160af4afd2a 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -140,7 +140,6 @@ along with GCC; see the file COPYING3. If not see
#include "toplev.h"
#include "recog.h"
#include "sched-int.h"
-#include "sched-deps.h"
#include "target.h"
#include "output.h"
#include "params.h"
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index 0bf1dd51e0d..1d0ae2c57c7 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -37,7 +37,6 @@ along with GCC; see the file COPYING3. If not see
#include "toplev.h"
#include "recog.h"
#include "sched-int.h"
-#include "sched-deps.h"
#include "target.h"
#include "cfglayout.h"
#include "cfgloop.h"
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 557050ff2bc..854bcbfe2a7 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -39,7 +39,6 @@ along with GCC; see the file COPYING3. If not see
#include "toplev.h"
#include "recog.h"
#include "sched-int.h"
-#include "sched-deps.h"
#include "params.h"
#include "cselib.h"
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c
index 57c6cb8d181..da5e76900fa 100644
--- a/gcc/sched-ebb.c
+++ b/gcc/sched-ebb.c
@@ -40,7 +40,6 @@ along with GCC; see the file COPYING3. If not see
#include "cfglayout.h"
#include "params.h"
#include "sched-int.h"
-#include "sched-deps.h"
#include "target.h"
#include "output.h"
diff --git a/gcc/sched-int.h b/gcc/sched-int.h
index 4798ef73893..49c8a2cfb85 100644
--- a/gcc/sched-int.h
+++ b/gcc/sched-int.h
@@ -638,6 +638,45 @@ extern struct haifa_sched_info *current_sched_info;
/* Indexed by INSN_UID, the collection of all data associated with
a single instruction. */
+struct _haifa_deps_insn_data
+{
+ /* The number of incoming edges in the forward dependency graph.
+ As scheduling proceeds, counts are decreased. An insn moves to
+ the ready queue when its counter reaches zero. */
+ int dep_count;
+
+ /* Nonzero if instruction has internal dependence
+ (e.g. add_dependence was invoked with (insn == elem)). */
+ unsigned int has_internal_dep;
+
+ /* NB: We can't place 'struct _deps_list' here instead of deps_list_t into
+ h_i_d because when h_i_d extends, addresses of the deps_list->first
+ change without updating deps_list->first->next->prev_nextp. Thus
+ BACK_DEPS and RESOLVED_BACK_DEPS are allocated on the heap and FORW_DEPS
+ list is allocated on the obstack. */
+
+ /* A list of hard backward dependencies. The insn is a consumer of all the
+ deps mentioned here. */
+ deps_list_t hard_back_deps;
+
+ /* A list of speculative (weak) dependencies. The insn is a consumer of all
+ the deps mentioned here. */
+ deps_list_t spec_back_deps;
+
+ /* A list of insns which depend on the instruction. Unlike 'back_deps',
+ it represents forward dependencies. */
+ deps_list_t forw_deps;
+
+ /* A list of scheduled producers of the instruction. Links are being moved
+ from 'back_deps' to 'resolved_back_deps' while scheduling. */
+ deps_list_t resolved_back_deps;
+
+ /* A list of scheduled consumers of the instruction. Links are being moved
+ from 'forw_deps' to 'resolved_forw_deps' while scheduling to fasten the
+ search in 'forw_deps'. */
+ deps_list_t resolved_forw_deps;
+};
+
struct _haifa_insn_data
{
/* We can't place 'struct _deps_list' into h_i_d instead of deps_list_t
@@ -712,6 +751,24 @@ extern VEC(haifa_insn_data_def, heap) *h_i_d;
#define INSN_REG_WEIGHT(INSN) (HID (INSN)->reg_weight)
#define INSN_PRIORITY_STATUS(INSN) (HID (INSN)->priority_status)
+typedef struct _haifa_deps_insn_data haifa_deps_insn_data_def;
+typedef haifa_deps_insn_data_def *haifa_deps_insn_data_t;
+
+DEF_VEC_O (haifa_deps_insn_data_def);
+DEF_VEC_ALLOC_O (haifa_deps_insn_data_def, heap);
+
+extern VEC(haifa_deps_insn_data_def, heap) *h_d_i_d;
+
+#define HDID(INSN) (VEC_index (haifa_deps_insn_data_def, h_d_i_d, \
+ INSN_LUID (INSN)))
+#define INSN_DEP_COUNT(INSN) (HDID (INSN)->dep_count)
+#define HAS_INTERNAL_DEP(INSN) (HDID (INSN)->has_internal_dep)
+#define INSN_FORW_DEPS(INSN) (HDID (INSN)->forw_deps)
+#define INSN_RESOLVED_BACK_DEPS(INSN) (HDID (INSN)->resolved_back_deps)
+#define INSN_RESOLVED_FORW_DEPS(INSN) (HDID (INSN)->resolved_forw_deps)
+#define INSN_HARD_BACK_DEPS(INSN) (HDID (INSN)->hard_back_deps)
+#define INSN_SPEC_BACK_DEPS(INSN) (HDID (INSN)->spec_back_deps)
+
/*#define INSN_HARD_BACK_DEPS(INSN) (HID (INSN)->hard_back_deps)
#define INSN_SPEC_BACK_DEPS(INSN) (HID (INSN)->spec_back_deps)
#define INSN_FORW_DEPS(INSN) (HID (INSN)->forw_deps)
@@ -978,7 +1035,133 @@ enum INSN_TRAP_CLASS
#define HAIFA_INLINE __inline
#endif
+struct _deps_insn_data
+{
+ /* Some insns (e.g. call) are not allowed to move across blocks. */
+ unsigned int cant_move : 1;
+};
+
+typedef struct _deps_insn_data deps_insn_data_def;
+typedef deps_insn_data_def *deps_insn_data_t;
+
+DEF_VEC_O (deps_insn_data_def);
+DEF_VEC_ALLOC_O (deps_insn_data_def, heap);
+
+extern VEC (deps_insn_data_def, heap) *d_i_d;
+
+#define DID(INSN) (VEC_index (deps_insn_data_def, d_i_d, INSN_LUID (INSN)))
+#define CANT_MOVE(INSN) (DID (INSN)->cant_move)
+
+struct sched_deps_info_def
+{
+ /* Called when computing dependencies for a JUMP_INSN. This function
+ should store the set of registers that must be considered as set by
+ the jump in the regset. */
+ void (*compute_jump_reg_dependencies) (rtx, regset, regset, regset);
+
+ /* Start analyzing insn. */
+ void (*start_insn) (rtx);
+
+ /* Finish analyzing insn. */
+ void (*finish_insn) (void);
+
+ /* Start analyzing insn subexpression. */
+ void (*start_x) (rtx);
+
+ /* Finish analyzing insn subexpression. */
+ void (*finish_x) (void);
+
+ /* Start analyzing insn LHS (Left Hand Side). */
+ void (*start_lhs) (rtx);
+
+ /* Finish analyzing insn LHS. */
+ void (*finish_lhs) (void);
+
+ /* Start analyzing insn RHS (Right Hand Side). */
+ void (*start_rhs) (rtx);
+
+ /* Finish analyzing insn RHS. */
+ void (*finish_rhs) (void);
+
+ /* Note set of the register. */
+ void (*note_reg_set) (int);
+
+ /* Note clobber of the register. */
+ void (*note_reg_clobber) (int);
+
+ /* Note use of the register. */
+ void (*note_reg_use) (int);
+
+ /* Note memory dependence of type DS between MEM1 and MEM2 (which is
+ in the INSN2). */
+ void (*note_mem_dep) (rtx mem1, rtx mem2, rtx insn2, ds_t ds);
+
+ /* Note a dependence of type DS from the INSN. */
+ void (*note_dep) (rtx insn, ds_t ds);
+
+ /* Nonzero if we should use cselib for better alias analysis. This
+ must be 0 if the dependency information is used after sched_analyze
+ has completed, e.g. if we're using it to initialize state for successor
+ blocks in region scheduling. */
+ unsigned int use_cselib : 1;
+
+ /* If set, generate links between instruction as DEPS_LIST.
+ Otherwise, generate usual INSN_LIST links. */
+ unsigned int use_deps_list : 1;
+
+ /* Generate data and control speculative dependencies.
+ Requires USE_DEPS_LIST set. */
+ unsigned int generate_spec_deps : 1;
+};
+
+extern struct sched_deps_info_def *sched_deps_info;
+
+
/* Functions in sched-deps.c. */
+extern bool sched_insns_conditions_mutex_p (const_rtx, const_rtx);
+extern void add_dependence (rtx, rtx, enum reg_note);
+extern void sched_analyze (struct deps *, rtx, rtx);
+extern void init_deps (struct deps *);
+extern void free_deps (struct deps *);
+extern void init_deps_global (void);
+extern void finish_deps_global (void);
+extern void deps_analyze_insn (struct deps *, rtx);
+extern void remove_from_deps (struct deps *, rtx);
+extern void add_forw_dep (dep_link_t);
+extern void compute_forward_dependences (rtx, rtx);
+extern enum DEPS_ADJUST_RESULT add_or_update_back_dep (rtx, rtx,
+ enum reg_note, ds_t);
+extern void add_or_update_back_forw_dep (rtx, rtx, enum reg_note, ds_t);
+extern void add_back_forw_dep (rtx, rtx, enum reg_note, ds_t);
+extern void delete_back_forw_dep (dep_link_t);
+extern dw_t get_dep_weak_1 (ds_t, ds_t);
+extern dw_t get_dep_weak (ds_t, ds_t);
+extern ds_t set_dep_weak (ds_t, ds_t, dw_t);
+extern dw_t estimate_dep_weak (rtx, rtx);
+extern ds_t ds_merge (ds_t, ds_t);
+extern ds_t ds_full_merge (ds_t, ds_t, rtx, rtx);
+extern ds_t ds_max_merge (ds_t, ds_t);
+extern dw_t ds_weak (ds_t);
+extern ds_t ds_get_speculation_types (ds_t);
+extern ds_t ds_get_max_dep_weak (ds_t);
+
+extern void deps_finish_d_i_d (void);
+
+extern void sched_deps_init (bool);
+extern void sched_deps_finish (void);
+extern void sched_deps_local_finish (void);
+
+extern void haifa_note_reg_set (int);
+extern void haifa_note_reg_clobber (int);
+extern void haifa_note_reg_use (int);
+
+extern void deps_start_bb (struct deps *, rtx);
+extern enum reg_note ds_to_dt (ds_t);
+
+extern bool deps_pools_are_empty_p (void);
+extern void sched_free_deps (rtx, rtx, bool);
+extern void extend_dependency_caches (int, bool);
+
extern void debug_ds (ds_t);
/* Functions in haifa-sched.c. */
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index 0e18b2847b5..1fcfa130428 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -64,7 +64,6 @@ along with GCC; see the file COPYING3. If not see
#include "cfglayout.h"
#include "params.h"
#include "sched-int.h"
-#include "sched-deps.h"
#include "cselib.h"
#include "target.h"
#include "timevar.h"
diff --git a/gcc/sel-sched-dump.c b/gcc/sel-sched-dump.c
index dddeac3d425..5eb10cb0002 100644
--- a/gcc/sel-sched-dump.c
+++ b/gcc/sel-sched-dump.c
@@ -204,6 +204,8 @@ dump_vinsn_1 (vinsn_t vi, int flags)
if (cost != -1)
sel_print ("cost:%d;", cost);
}
+
+ sel_print (")");
}
void
@@ -217,7 +219,7 @@ debug_vinsn (vinsn_t vi)
{
switch_dump ();
dump_vinsn_1 (vi, debug_vinsn_flags);
- sel_print (")\n");
+ sel_print ("\n");
switch_dump ();
}
@@ -932,6 +934,7 @@ debug_av_set (av_set_t av)
{
switch_dump ();
dump_av_set (av);
+ sel_print ("\n");
switch_dump ();
}
@@ -941,6 +944,7 @@ debug_lv_set (regset lv)
{
switch_dump ();
dump_lv_set (lv);
+ sel_print ("\n");
switch_dump ();
}
@@ -950,6 +954,7 @@ debug_ilist (ilist_t p)
{
switch_dump ();
dump_ilist (p);
+ sel_print ("\n");
switch_dump ();
}
@@ -959,6 +964,7 @@ debug_blist (blist_t bnds)
{
switch_dump ();
dump_blist (bnds);
+ sel_print ("\n");
switch_dump ();
}
@@ -968,6 +974,7 @@ debug_insn_vector (rtx_vec_t succs)
{
switch_dump ();
dump_insn_vector (succs);
+ sel_print ("\n");
switch_dump ();
}
@@ -977,6 +984,7 @@ debug_hard_reg_set (HARD_REG_SET set)
{
switch_dump ();
dump_hard_reg_set ("", set);
+ sel_print ("\n");
switch_dump ();
}
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 578ba745f99..3d24dfa0d94 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -43,7 +43,6 @@
#include "tree-pass.h"
#include "sched-rgn.h"
#include "sched-int.h"
-#include "sched-deps.h"
#include "cselib.h"
#include "ggc.h"
#include "tree.h"
diff --git a/gcc/sel-sched.c b/gcc/sel-sched.c
index 8eb7cf3a816..1ca0cc320a3 100644
--- a/gcc/sel-sched.c
+++ b/gcc/sel-sched.c
@@ -44,7 +44,6 @@
#include "tree-pass.h"
#include "sched-rgn.h"
#include "sched-int.h"
-#include "sched-deps.h"
#include "cselib.h"
#include "ggc.h"
#include "tree.h"
@@ -363,8 +362,8 @@ static basic_block generate_bookkeeping_insn (expr_t, insn_t, edge, edge);
static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *,
def_list_t *);
static bool move_op (insn_t, av_set_t, rtx, expr_t);
-static bool code_motion_path_driver (insn_t, av_set_t, ilist_t,
- cmpd_local_params_p, void *);
+static int code_motion_path_driver (insn_t, av_set_t, ilist_t,
+ cmpd_local_params_p, void *);
static void sel_sched_region_1 (void);
static void sel_sched_region_2 (sel_sched_region_2_data_t);
static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool);
@@ -451,8 +450,19 @@ extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
if (single_succ_p (bb)
&& single_pred_p (single_succ (bb)))
{
- FENCE_INSN (fence) = sel_bb_head (single_succ (bb));
- move_fence_to_fences (old_fences, new_fences);
+ insn_t succ = sel_bb_head (single_succ (bb));
+
+ if (INSN_SEQNO (succ) > 0
+ && INSN_SEQNO (succ) <= orig_max_seqno
+ && INSN_SCHED_TIMES (succ) <= 0)
+ {
+ FENCE_INSN (fence) = succ;
+ move_fence_to_fences (old_fences, new_fences);
+
+ if (sched_verbose >= 1)
+ sel_print ("Fence %d continues as %d[%d] (state continue)\n",
+ INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ));
+ }
return;
}
@@ -2817,7 +2827,7 @@ find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
{
def_list_iterator i;
def_t def;
- bool res;
+ int res;
bool needs_spec_check_p = false;
expr_t expr;
av_set_iterator expr_iter;
@@ -2837,7 +2847,7 @@ find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
- gcc_assert (res);
+ gcc_assert (res == 1);
gcc_assert (original_insns && *original_insns);
/* ??? We calculate whether an expression needs a check when computing
@@ -3752,32 +3762,56 @@ find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence)
if (best == NULL && ready.n_ready > 0)
{
- int can_issue, privileged_n, index, avail_n;
+ int privileged_n, index, avail_n;
can_issue_more = invoke_reorder_hooks (fence);
+ if (can_issue_more > 0)
+ {
+ /* Try choosing the best insn until we find one that is could be
+ scheduled due to liveness restrictions on its destination register.
+ In the future, we'd like to choose once and then just probe insns
+ in the order of their priority. */
+ avail_n = invoke_dfa_lookahead_guard ();
+ privileged_n = calculate_privileged_insns ();
+ can_issue_more = choose_best_insn (fence, privileged_n, &index);
+ if (can_issue_more)
+ {
+ best = find_expr_for_ready (index, true);
+ if (EXPR_WAS_RENAMED (best))
+ EXPR_WAS_RENAMED (best) = 0;
+ }
+ }
+ /* We had some available insns, so if we can't issue them,
+ we have a stall. */
if (can_issue_more == 0)
- return NULL;
-
- /* Try choosing the best insn until we find one that is could be
- scheduled due to liveness restrictions on its destination register.
- In the future, we'd like to choose once and then just probe insns
- in the order of their priority. */
- avail_n = invoke_dfa_lookahead_guard ();
- privileged_n = calculate_privileged_insns ();
- can_issue = choose_best_insn (fence, privileged_n, &index);
- if (can_issue)
{
- best = find_expr_for_ready (index, true);
- if (EXPR_WAS_RENAMED (best))
- EXPR_WAS_RENAMED (best) = 0;
+ best = NULL;
+ need_stall = 1;
}
- else
- need_stall = 1;
}
if (best != NULL)
+ {
can_issue_more = invoke_aftermath_hooks (fence, EXPR_INSN_RTX (best),
can_issue_more);
+ if (can_issue_more == 0)
+ {
+ need_stall = 1;
+ best = NULL;
+ }
+ }
+
+ if (sched_verbose >= 2)
+ {
+ if (best != NULL)
+ {
+ sel_print ("Best expression (vliw form): ");
+ dump_expr (best);
+ sel_print ("; cycle %d\n", FENCE_CYCLE (fence));
+ }
+ else
+ sel_print ("No best expr found!\n");
+ }
return best;
}
@@ -4202,7 +4236,7 @@ remove_temp_moveop_nops (void)
{
int i;
insn_t insn;
-
+
for (i = 0; VEC_iterate (insn_t, vec_temp_moveop_nops, i, insn); i++)
{
gcc_assert (INSN_NOP_P (insn));
@@ -4213,521 +4247,547 @@ remove_temp_moveop_nops (void)
if (VEC_length (insn_t, vec_temp_moveop_nops) > 0)
VEC_block_remove (insn_t, vec_temp_moveop_nops, 0,
VEC_length (insn_t, vec_temp_moveop_nops));
-
}
-/* Records the number of fill_insns runs for debugging purposes. */
-static int fill_insns_run = 0;
-
/* Records the maximal UID before moving up an instruction. Used for
distinguishing between bookkeeping copies and original insns. */
static int max_uid_before_move_op = 0;
-/* Gather a parallel group of insns at FENCE and assign their seqno
- to SEQNO. All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP
- list for later recalculation of seqnos. */
+#ifdef ENABLE_CHECKING
+/* Records the number of fill_insns runs for debugging purposes. */
+static int fill_insns_run = 0;
+
static void
-fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
+remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p)
{
- blist_t bnds = NULL, *bnds_tailp;
- av_set_t av_vliw = NULL;
- insn_t insn = FENCE_INSN (fence);
- state_t temp_state = alloca (dfa_state_size);
+ int now;
+ int start;
+ int stop;
+ bool do_p;
- if (sched_verbose >= 2)
- sel_print ("\nStarting fill_insns for insn %d, cycle %d\n",
- INSN_UID (insn), FENCE_CYCLE (fence));
+ sel_dump_cfg ("after-compute_av");
- blist_add (&bnds, insn, NULL, FENCE_DC (fence));
- bnds_tailp = &BLIST_NEXT (bnds);
- set_target_context (FENCE_TC (fence));
- target_bb = INSN_BB (insn);
+ now = ++fill_insns_run;
+ start = PARAM_VALUE (PARAM_INSN_START);
+ stop = PARAM_VALUE (PARAM_INSN_STOP);
+ do_p = (PARAM_VALUE (PARAM_INSN_P) == 1);
- /* Do while we can add any operation to the current group. */
- do
- {
- blist_t bnds1, *bnds_tailp1, *bndsp, bnds_tail1;
- expr_t expr_vliw;
+ if (do_p)
+ do_p = (start <= now) && (now <= stop);
+ else
+ do_p = (start > now) || (now > stop);
- if (sched_verbose >= 2)
- {
- sel_print ("Boundaries: ");
- dump_blist (bnds);
- sel_print ("\n");
- }
+ /* If more advanced --param insn-range was specified, use only it. */
+ if (flag_insn_range)
+ {
+ bool err = false;
- /* Compute the set of available expressions. */
- bnds1 = bnds;
- do
- {
- bnd_t bnd = BLIST_BND (bnds1);
- av_set_t av1_copy;
- insn_t new_bnd_to = BND_TO (bnd);
-
- /* Rewind BND->TO to the basic block header in case some bookkeeping
- instructions were inserted before BND->TO and it needs to be
- adjusted. */
- while (!NOTE_INSN_BASIC_BLOCK_P (PREV_INSN (new_bnd_to)))
- {
- new_bnd_to = PREV_INSN (new_bnd_to);
+ do_p = in_range_p (now, flag_insn_range, &err);
+ /* Error may be caused by invalid expression. Note that the
+ valid expression shouldn't contain any spaces. */
+ gcc_assert (!err);
+ }
- /* Assert that this can only happen with unscheduled code. */
- gcc_assert (INSN_SCHED_TIMES (new_bnd_to) == 0);
- }
- BND_TO (bnd) = new_bnd_to;
+ if (!do_p)
+ /* Leave only the next insn in av_vliw. */
+ {
+ av_set_iterator av_it;
+ expr_t expr;
+ bnd_t bnd = BLIST_BND (bnds);
+ insn_t next = BND_TO (bnd);
- av_set_clear (&BND_AV (bnd));
- BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true);
+ gcc_assert (BLIST_NEXT (bnds) == NULL);
- av_set_clear (&BND_AV1 (bnd));
- BND_AV1 (bnd) = av_set_copy (BND_AV (bnd));
+ FOR_EACH_EXPR_1 (expr, av_it, av_vliw_p)
+ if (EXPR_INSN_RTX (expr) != next)
+ av_set_iter_remove (&av_it);
+ }
+}
+#endif
- moveup_set_path (&BND_AV1 (bnd), BND_PTR (bnd));
+/* Compute available instructions on boundaries. */
+static void
+compute_av_set_on_boundaries (blist_t bnds, av_set_t *av_vliw_p)
+{
+ if (sched_verbose >= 2)
+ {
+ sel_print ("Boundaries: ");
+ dump_blist (bnds);
+ sel_print ("\n");
+ }
- av1_copy = av_set_copy (BND_AV1 (bnd));
- av_set_union_and_clear (&av_vliw, &av1_copy, NULL);
+ while (bnds)
+ {
+ bnd_t bnd = BLIST_BND (bnds);
+ av_set_t av1_copy;
+ insn_t bnd_to = BND_TO (bnd);
+
+ /* Rewind BND->TO to the basic block header in case some bookkeeping
+ instructions were inserted before BND->TO and it needs to be
+ adjusted. */
+ while (! sel_bb_head_p (bnd_to))
+ {
+ bnd_to = PREV_INSN (bnd_to);
+ gcc_assert (INSN_SCHED_TIMES (bnd_to) == 0);
}
- while ((bnds1 = BLIST_NEXT (bnds1)));
+ BND_TO (bnd) = bnd_to;
- remove_insns_that_need_bookkeeping (fence, &av_vliw);
-
- /* If debug parameters tell us to ignore this attempt to move an insn,
- obey. */
- {
- int now;
- int start;
- int stop;
- bool do_p;
+ av_set_clear (&BND_AV (bnd));
+ BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true);
- sel_dump_cfg ("after-compute_av");
+ av_set_clear (&BND_AV1 (bnd));
+ BND_AV1 (bnd) = av_set_copy (BND_AV (bnd));
- now = ++fill_insns_run;
- start = PARAM_VALUE (PARAM_INSN_START);
- stop = PARAM_VALUE (PARAM_INSN_STOP);
- do_p = (PARAM_VALUE (PARAM_INSN_P) == 1);
+ moveup_set_path (&BND_AV1 (bnd), BND_PTR (bnd));
- if (do_p)
- do_p = (start <= now) && (now <= stop);
- else
- do_p = (start > now) || (now > stop);
+ av1_copy = av_set_copy (BND_AV1 (bnd));
+ av_set_union_and_clear (av_vliw_p, &av1_copy, NULL);
- /* If more advanced --param insn-range was specified, use only it. */
- if (flag_insn_range)
- {
- bool err = false;
-
- do_p = in_range_p (now, flag_insn_range, &err);
- /* Error may be caused by invalid expression. Note that the
- valid expression shouldn't contain any spaces. */
- gcc_assert (!err);
- }
-
- if (!do_p)
- /* Leave only the next insn in av_vliw. */
- {
- av_set_iterator av_it;
- expr_t expr;
- bnd_t bnd = BLIST_BND (bnds);
- insn_t next = BND_TO (bnd);
-
- gcc_assert (BLIST_NEXT (bnds) == NULL);
-
- FOR_EACH_EXPR_1 (expr, av_it, &av_vliw)
- if (EXPR_INSN_RTX (expr) != next)
- av_set_iter_remove (&av_it);
- }
- }
+ bnds = BLIST_NEXT (bnds);
+ }
- /* Now we've computed AV_VLIW - the set of expressions that can be
- scheduled on FENCE. */
+ if (sched_verbose >= 2)
+ {
+ sel_print ("Available exprs (vliw form): ");
+ dump_av_set (*av_vliw_p);
+ sel_print ("\n");
+ }
+}
- if (sched_verbose >= 2)
+/* Calculate the sequential av set corresponding to the EXPR_VLIW
+ expression. */
+static av_set_t
+find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw)
+{
+ av_set_t expr_seq = NULL;
+ bool first_p = true;
+ expr_t expr;
+ av_set_iterator i;
+
+ FOR_EACH_EXPR (expr, i, BND_AV (bnd))
+ {
+ ilist_t root = BND_PTR (bnd);
+
+ if (equal_after_moveup_path_p (expr, root, expr_vliw))
{
- sel_print ("Available exprs (vliw form): ");
- dump_av_set (av_vliw);
- sel_print ("\n");
+ gcc_assert (first_p);
+ first_p = false;
+
+ /* The sequential expression has the right form to pass
+ to move_op except when renaming happened. Put the
+ correct register in EXPR then. */
+ if (EXPR_SEPARABLE_P (expr) && REG_P (EXPR_LHS (expr))
+ && expr_dest_regno (expr) != expr_dest_regno (expr_vliw))
+ {
+ replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw));
+ stat_renamed_scheduled++;
+ }
+
+ av_set_add (&expr_seq, expr);
+ if (EXPR_WAS_SUBSTITUTED (expr))
+ stat_substitutions_total++;
}
+ }
- if (av_vliw == NULL)
- break;
+ if (sched_verbose >= 2)
+ {
+ sel_print ("Best expression(s) (sequential form): ");
+ dump_av_set (expr_seq);
+ sel_print ("\n");
+ }
+
+ return expr_seq;
+}
- /* Choose the best expression and, if needed, destination register
- for it. */
- expr_vliw = find_best_expr (&av_vliw, bnds, fence);
- if (!expr_vliw)
- {
- if (sched_verbose >= 2)
- sel_print ("No best expr found!\n");
+/* Move nop to previous block. */
+static void
+move_nop_to_previous_block (insn_t nop, basic_block prev_bb)
+{
+ insn_t prev_insn, next_insn, note;
- /* Reorder* hooks told us nothing more to schedule; indicate that
- a stall is needed. */
- if (can_issue_more == 0)
- need_stall = 1;
+ gcc_assert (sel_bb_head_p (nop)
+ && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb);
+ note = bb_note (BLOCK_FOR_INSN (nop));
+ prev_insn = sel_bb_end (prev_bb);
+ next_insn = NEXT_INSN (nop);
+ gcc_assert (prev_insn != NULL_RTX
+ && PREV_INSN (note) == prev_insn);
- av_set_clear (&av_vliw);
- break;
- }
- if (sched_verbose >= 2)
- {
- sel_print ("Best expression (vliw form): ");
- dump_expr (expr_vliw);
- sel_print ("; cycle %d\n", FENCE_CYCLE (fence));
- }
-
- bndsp = &bnds;
- bnds_tailp1 = bnds_tailp;
- do
- /* !!! This code is guaranteed to execute only once. */
- {
- bnd_t bnd = BLIST_BND (*bndsp);
- av_set_t expr_seq = NULL;
- expr_t expr;
- av_set_iterator i;
- succ_iterator si;
- insn_t succ;
+ NEXT_INSN (prev_insn) = nop;
+ PREV_INSN (nop) = prev_insn;
- insn_t place_to_insert;
- int n_bookkeeping_copies_before_moveop;
- bool asm_p;
- bool first_p = true;
+ PREV_INSN (note) = nop;
+ NEXT_INSN (note) = next_insn;
- if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr_vliw)))
- {
- bndsp = &BLIST_NEXT (*bndsp);
- bnds_tail1 = *bnds_tailp1;
- continue;
- }
+ NEXT_INSN (nop) = note;
+ PREV_INSN (next_insn) = note;
- FOR_EACH_EXPR (expr, i, BND_AV (bnd))
- {
- ilist_t root = BND_PTR (bnd);
+ BB_END (prev_bb) = nop;
+ BLOCK_FOR_INSN (nop) = prev_bb;
+}
- if (equal_after_moveup_path_p (expr, root, expr_vliw))
- {
- gcc_assert (first_p);
- first_p = false;
-
- /* The sequential expression has the right form to pass
- to move_op except when renaming happened. Put the
- correct register in EXPR then. */
- if (EXPR_SEPARABLE_P (expr) && REG_P (EXPR_LHS (expr))
- && expr_dest_regno (expr) != expr_dest_regno (expr_vliw))
- {
- replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw));
- stat_renamed_scheduled++;
- }
+/* Prepare a place to insert the chosen expression on BND. */
+static insn_t
+prepare_place_to_insert (bnd_t bnd)
+{
+ insn_t place_to_insert, prev_insn;
+ basic_block bb, prev_bb;
- av_set_add (&expr_seq, expr);
- if (EXPR_WAS_SUBSTITUTED (expr))
- stat_substitutions_total++;
- }
- }
+ /* Init place_to_insert before calling move_op, as the later
+ can possibly remove BND_TO (bnd). */
+ if (/* If this is not the first insn scheduled. */
+ BND_PTR (bnd))
+ {
+ gcc_unreachable ();
- if (sched_verbose >= 2)
- {
- sel_print ("Best expression(s) (sequential form): ");
- dump_av_set (expr_seq);
- sel_print ("\n");
- }
+ /* Add it after last scheduled. */
+ place_to_insert = ILIST_INSN (BND_PTR (bnd));
+ }
+ else
+ /* Add it before BND_TO. The difference is in the
+ basic block, where INSN will be added. */
+ place_to_insert = PREV_INSN (BND_TO (bnd));
+
+ prev_insn = PREV_INSN (place_to_insert);
+ bb = BLOCK_FOR_INSN (place_to_insert);
+ prev_bb = bb->prev_bb;
+
+ if (!NOTE_INSN_BASIC_BLOCK_P (place_to_insert)
+ || prev_insn == NULL_RTX
+ /* Or it is a label, a barrier or something strange
+ alike. */
+ || !INSN_P (prev_insn)
+ || BLOCK_FOR_INSN (prev_insn) != prev_bb
+ || !in_current_region_p (prev_bb)
+ || control_flow_insn_p (prev_insn))
+ {
+ /* Generate a nop that will help us to avoid removing
+ data sets we need. */
+ place_to_insert = NEXT_INSN (place_to_insert);
+ gcc_assert (BLOCK_FOR_INSN (place_to_insert) == bb);
+ place_to_insert = get_nop_from_pool (place_to_insert);
- /* Init place_to_insert before calling move_op, as the later
- can possibly remove BND_TO (bnd). */
- if (/* If this is not the first insn scheduled. */
- BND_PTR (bnd))
- {
- gcc_unreachable ();
+ prev_bb = bb;
- /* Add it after last scheduled. */
- place_to_insert = ILIST_INSN (BND_PTR (bnd));
- }
- else
- /* Add it before BND_TO. The difference is in the
- basic block, where INSN will be added. */
- place_to_insert = PREV_INSN (BND_TO (bnd));
-
- /* In case of scheduling a jump skipping some other instructions,
- prepare CFG. After this, jump is at the boundary and can be
- scheduled as usual insn by MOVE_OP. */
- if (vinsn_cond_branch_p (EXPR_VINSN (av_set_element (expr_seq, 0))))
- {
- insn = EXPR_INSN_RTX (av_set_element (expr_seq, 0));
+ /* Split block to generate a new floating bb header. */
+ bb = sched_split_block (bb, place_to_insert);
+ copy_data_sets (bb, prev_bb);
+ }
+ else
+ {
+ if (NOTE_INSN_BASIC_BLOCK_P (place_to_insert))
+ {
+ place_to_insert = NEXT_INSN (place_to_insert);
+ gcc_assert (BLOCK_FOR_INSN (place_to_insert) == bb);
+ }
- /* Speculative jumps are not handled. */
- if (insn != BND_TO (bnd)
- && !sel_insn_is_speculation_check (insn))
- move_cond_jump (insn, bnd);
- }
+ /* Generate a nop that will help us to avoid removing
+ data sets we need. */
+ place_to_insert = get_nop_from_pool (place_to_insert);
+ move_nop_to_previous_block (place_to_insert, prev_bb);
+ }
- /* Actually move chosen insn. */
- {
- expr_def _c_expr, *c_expr = &_c_expr;
- bool b;
-
- /* Find a place for C_EXPR to schedule.
- We want to have an invariant that only insns that are
- sel_bb_header_p () have a valid LV_SET. But, in the same time,
- we don't want overhead from recomputation of compute_live ()
- for the half of a block after each movement. Resolution of
- this is floating bb header that will advance along with the
- fence.
-
- Please note that the invariant is an implication: e.g. there
- can be sel_bb_header_p () insns that don't have a valid LV_SET.
- To make an equivalence out of implication we need to invoke
- compute_live () after scheduling of an insn that become
- sel_bb_header_p () - the overhead will be insignificant because
- this case is only possible when we start scheduling of a new
- basic block. Also I've just thought about another concerning
- issue:
- suppose we have a function from a single insn. So far we've
- stripped that insn from the stream in move_op () - and, hence,
- deleted the only valid LV_SET - how are we supposed to get a
- valid LV_SET for the inserted insn out of nowhere? */
+ gcc_assert (single_succ (prev_bb) == bb);
+ return place_to_insert;
+}
- {
- insn_t prev_insn = PREV_INSN (place_to_insert);
- basic_block bb = BLOCK_FOR_INSN (place_to_insert);
- basic_block prev_bb = bb->prev_bb;
-
- if (!NOTE_INSN_BASIC_BLOCK_P (place_to_insert)
- || prev_insn == NULL_RTX
- /* Or it is a label, a barrier or something strange
- alike. */
- || !INSN_P (prev_insn)
- || BLOCK_FOR_INSN (prev_insn) != prev_bb
- || !in_current_region_p (prev_bb)
- || control_flow_insn_p (prev_insn))
- {
- /* Generate a nop that will help us to avoid removing
- data sets we need. */
- place_to_insert = NEXT_INSN (place_to_insert);
- gcc_assert (BLOCK_FOR_INSN (place_to_insert) == bb);
- place_to_insert = get_nop_from_pool (place_to_insert);
+/* Find original instructions for EXPR_SEQ and move it to BND boundary.
+ Return the expression to emit in C_EXPR. */
+static void
+move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
+ av_set_t expr_seq, expr_t c_expr)
+{
+ bool b;
+ unsigned book_uid;
+ bitmap_iterator bi;
+ int n_bookkeeping_copies_before_moveop;
- prev_bb = bb;
+#ifdef ENABLE_CHECKING
+ sel_dump_cfg ("before-move_op");
- /* Split block to generate a new floating bb header. */
- bb = sched_split_block (bb, place_to_insert);
- copy_data_sets (bb, prev_bb);
- }
- else
- {
- if (NOTE_INSN_BASIC_BLOCK_P (place_to_insert))
- {
- place_to_insert = NEXT_INSN (place_to_insert);
- gcc_assert (BLOCK_FOR_INSN (place_to_insert) == bb);
- }
+ /* Marker is useful to bind .dot dumps and the log. */
+ if (sched_verbose >= 6)
+ print_marker_to_log ();
+#endif
- /* Generate a nop that will help us to avoid removing
- data sets we need. */
- place_to_insert = get_nop_from_pool (place_to_insert);
+ /* Make a move. This call will remove the original operation,
+ insert all necessary bookkeeping instructions and update the
+ data sets. After that all we have to do is add the operation
+ at before BND_TO (BND). */
+ n_bookkeeping_copies_before_moveop = stat_bookkeeping_copies;
+ max_uid_before_move_op = get_max_uid ();
+ bitmap_clear (current_copies);
+ bitmap_clear (current_originators);
- /* Move the nop to the previous block. */
- {
- insn_t prev_insn = sel_bb_end (prev_bb);
- insn_t note = bb_note (bb);
- insn_t nop_insn = sel_bb_head (bb);
- insn_t next_insn = NEXT_INSN (nop_insn);
+ b = move_op (BND_TO (bnd), expr_seq,
+ get_dest_from_orig_ops (expr_seq), c_expr);
- gcc_assert (prev_insn != NULL_RTX
- && nop_insn == place_to_insert
- && PREV_INSN (note) == prev_insn);
+ if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
+ stat_insns_needed_bookkeeping++;
+
+ remove_temp_moveop_nops ();
- NEXT_INSN (prev_insn) = nop_insn;
- PREV_INSN (nop_insn) = prev_insn;
+ EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi)
+ {
+ /* We allocate these bitmaps lazily. */
+ if (! INSN_ORIGINATORS_BY_UID (book_uid))
+ INSN_ORIGINATORS_BY_UID (book_uid) = BITMAP_ALLOC (NULL);
+
+ bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
+ current_originators);
+ }
+
+ /* We should be able to find the expression we've chosen for
+ scheduling. */
+ gcc_assert (b);
+
+ /* We want to use a pattern from expr_vliw, because it could've
+ been substituted, and the rest of data from expr_seq. */
+ if (! rtx_equal_p (EXPR_PATTERN (expr_vliw),
+ EXPR_PATTERN (c_expr)))
+ change_vinsn_in_expr (c_expr, EXPR_VINSN (expr_vliw));
+}
- PREV_INSN (note) = nop_insn;
- NEXT_INSN (note) = next_insn;
+/* Advance state on FENCE with INSN. Return true if INSN is
+ an ASM, and we should advance state once more. */
+static bool
+advance_state_on_fence (fence_t fence, insn_t insn)
+{
+ bool asm_p;
- NEXT_INSN (nop_insn) = note;
- PREV_INSN (next_insn) = note;
+ if (recog_memoized (insn) >= 0)
+ {
+ int res;
+ state_t temp_state = alloca (dfa_state_size);
+
+ gcc_assert (!INSN_ASM_P (insn));
+ asm_p = false;
- BB_END (prev_bb) = nop_insn;
- BLOCK_FOR_INSN (nop_insn) = prev_bb;
- }
- }
+ memcpy (temp_state, FENCE_STATE (fence), dfa_state_size);
+ res = state_transition (FENCE_STATE (fence), insn);
+ gcc_assert (res < 0);
- gcc_assert (single_succ (prev_bb) == bb);
+ if (memcmp (temp_state, FENCE_STATE (fence), dfa_state_size))
+ {
+ FENCE_ISSUED_INSNS (fence)++;
- sel_dump_cfg ("before-move_op");
+ /* We should never issue more than issue_rate insns. */
+ if (FENCE_ISSUED_INSNS (fence) > issue_rate)
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ /* This could be an ASM insn which we'd like to schedule
+ on the next cycle. */
+ asm_p = INSN_ASM_P (insn);
+ if (!FENCE_STARTS_CYCLE_P (fence) && asm_p)
+ advance_one_cycle (fence);
+ }
- /* Marker is useful to bind .dot dumps and the log. */
- if (sched_verbose >= 6)
- print_marker_to_log ();
+ FENCE_STARTS_CYCLE_P (fence) = 0;
+ return asm_p;
+}
- /* Make a move. This call will remove the original operation,
- insert all necessary bookkeeping instructions and update the
- data sets. After that all we have to do is add the operation
- at before BND_TO (BND). */
- n_bookkeeping_copies_before_moveop = stat_bookkeeping_copies;
- max_uid_before_move_op = get_max_uid ();
- bitmap_clear (current_copies);
- bitmap_clear (current_originators);
+/* Update FENCE on which INSN was scheduled and this INSN, too. */
+static void
+update_fence_and_insn (fence_t fence, insn_t insn)
+{
+ bool asm_p;
+
+ /* First, reflect that something is scheduled on this fence. */
+ asm_p = advance_state_on_fence (fence, insn);
+ FENCE_LAST_SCHEDULED_INSN (fence) = insn;
+ VEC_safe_push (rtx, gc, FENCE_EXECUTING_INSNS (fence), insn);
+ if (SCHED_GROUP_P (insn))
+ {
+ FENCE_SCHED_NEXT (fence) = INSN_SCHED_NEXT (insn);
+ SCHED_GROUP_P (insn) = 0;
+ }
+ else
+ FENCE_SCHED_NEXT (fence) = NULL_RTX;
+ if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
+ FENCE_READY_TICKS (fence) [INSN_UID (insn)] = 0;
+
+ /* Set instruction scheduling info. This will be used in bundling,
+ pipelining, tick computations etc. */
+ ++INSN_SCHED_TIMES (insn);
+ EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true;
+ EXPR_ORIG_SCHED_CYCLE (INSN_EXPR (insn)) = FENCE_CYCLE (fence);
+ INSN_AFTER_STALL_P (insn) = FENCE_AFTER_STALL_P (fence);
+ INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence);
+
+ /* This does not account for adjust_cost hooks, just add the biggest
+ constant the hook may add to the latency. TODO: make this
+ a target dependent constant. */
+ INSN_READY_CYCLE (insn)
+ = INSN_SCHED_CYCLE (insn) + (INSN_CODE (insn) < 0
+ ? 1
+ : maximal_insn_latency (insn) + 1);
+
+ /* Change these fields last, as they're used above. */
+ FENCE_AFTER_STALL_P (fence) = 0;
+ if (asm_p)
+ advance_one_cycle (fence);
+
+ /* Indicate that we've scheduled something on this fence. */
+ FENCE_SCHEDULED_P (fence) = true;
+ scheduled_something_on_previous_fence = true;
+
+ /* Print debug information when insn's fields are updated. */
+ if (sched_verbose >= 2)
+ {
+ sel_print ("Scheduling insn: ");
+ dump_insn_1 (insn, 1);
+ sel_print ("\n");
+ }
+}
- b = move_op (BND_TO (bnd), expr_seq,
- get_dest_from_orig_ops (expr_seq), c_expr);
- remove_temp_moveop_nops ();
+/* Update boundary BND with INSN and add new boundaries to BNDS_TAIL_P. */
+static blist_t *
+update_boundaries (bnd_t bnd, insn_t insn, blist_t *bndsp,
+ blist_t *bnds_tailp)
+{
+ succ_iterator si;
+ insn_t succ;
- if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
- stat_insns_needed_bookkeeping++;
+ advance_deps_context (BND_DC (bnd), insn);
+ FOR_EACH_SUCC_1 (succ, si, insn,
+ SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
+ {
+ ilist_t ptr = ilist_copy (BND_PTR (bnd));
+
+ ilist_add (&ptr, insn);
+ blist_add (bnds_tailp, succ, ptr, BND_DC (bnd));
+ bnds_tailp = &BLIST_NEXT (*bnds_tailp);
+ }
+
+ blist_remove (bndsp);
+ return bnds_tailp;
+}
- {
- unsigned book_uid;
- bitmap_iterator bi;
-
- EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi)
- {
- /* We allocate these bitmaps lazily. */
- if (! INSN_ORIGINATORS_BY_UID (book_uid))
- INSN_ORIGINATORS_BY_UID (book_uid) = BITMAP_ALLOC (NULL);
-
- bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
- current_originators);
- }
- }
+/* Schedule EXPR_VLIW on BND. Return the insn emitted. */
+static insn_t
+schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
+{
+ av_set_t expr_seq;
+ expr_t c_expr = alloca (sizeof (expr_def));
+ insn_t place_to_insert;
+ insn_t insn;
- /* We should be able to find the expression we've chosen for
- scheduling. */
- gcc_assert (b);
+ expr_seq = find_sequential_best_exprs (bnd, expr_vliw);
- /* We want to use a pattern from expr_vliw, because it could've
- been substituted, and the rest of data from expr_seq. */
- if (! rtx_equal_p (EXPR_PATTERN (expr_vliw),
- EXPR_PATTERN (c_expr)))
- change_vinsn_in_expr (c_expr, EXPR_VINSN (expr_vliw));
- }
+ /* In case of scheduling a jump skipping some other instructions,
+ prepare CFG. After this, jump is at the boundary and can be
+ scheduled as usual insn by MOVE_OP. */
+ if (vinsn_cond_branch_p (EXPR_VINSN (expr_vliw)))
+ {
+ insn = EXPR_INSN_RTX (expr_vliw);
+
+ /* Speculative jumps are not handled. */
+ if (insn != BND_TO (bnd)
+ && !sel_insn_is_speculation_check (insn))
+ move_cond_jump (insn, bnd);
+ }
- /* Add the instruction. */
- insn = emit_insn_from_expr_after (c_expr, NULL, seqno,
- place_to_insert);
- clear_expr (c_expr);
+ /* Find a place for C_EXPR to schedule. */
+ place_to_insert = prepare_place_to_insert (bnd);
+ move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
+
+ /* Add the instruction. */
+ insn = emit_insn_from_expr_after (c_expr, NULL, seqno,
+ place_to_insert);
+ clear_expr (c_expr);
+
+ /* Return the nop generated for preserving of data sets back
+ into pool. */
+ if (INSN_NOP_P (place_to_insert))
+ return_nop_to_pool (place_to_insert);
+
+ av_set_clear (&expr_seq);
- ++INSN_SCHED_TIMES (insn);
- EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true;
- EXPR_ORIG_SCHED_CYCLE (INSN_EXPR (insn)) = fence->cycle;
+ /* Check that the recent movement didn't destroyed loop
+ structure. */
+ gcc_assert (!pipelining_p
+ || current_loop_nest == NULL
+ || loop_latch_edge (current_loop_nest));
+ return insn;
+}
- if (INSN_NOP_P (place_to_insert))
- /* Return the nop generated for preserving of data sets back
- into pool. */
- return_nop_to_pool (place_to_insert);
- }
+/* Gather a parallel group of insns at FENCE and assign their seqno
+ to SEQNO. All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP
+ list for later recalculation of seqnos. */
+static void
+fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
+{
+ blist_t bnds = NULL, *bnds_tailp;
+ av_set_t av_vliw = NULL;
+ insn_t insn = FENCE_INSN (fence);
- av_set_clear (&expr_seq);
+ if (sched_verbose >= 2)
+ sel_print ("\nStarting fill_insns for insn %d, cycle %d\n",
+ INSN_UID (insn), FENCE_CYCLE (fence));
- /* Advance the DFA. */
- if (recog_memoized (insn) >= 0)
- {
- int res;
-
- gcc_assert (!INSN_ASM_P (insn));
- asm_p = false;
+ blist_add (&bnds, insn, NULL, FENCE_DC (fence));
+ bnds_tailp = &BLIST_NEXT (bnds);
+ set_target_context (FENCE_TC (fence));
+ target_bb = INSN_BB (insn);
- memcpy (temp_state, FENCE_STATE (fence), dfa_state_size);
+ /* Do while we can add any operation to the current group. */
+ do
+ {
+ blist_t *bnds_tailp1, *bndsp;
+ expr_t expr_vliw;
- res = state_transition (FENCE_STATE (fence), insn);
- gcc_assert (res < 0);
+ compute_av_set_on_boundaries (bnds, &av_vliw);
+ remove_insns_that_need_bookkeeping (fence, &av_vliw);
- if (memcmp (temp_state, FENCE_STATE (fence), dfa_state_size))
- {
- FENCE_ISSUED_INSNS (fence)++;
- /* We should never issue more than issue_rate insns. */
- if (FENCE_ISSUED_INSNS (fence) > issue_rate)
- gcc_unreachable ();
- }
- }
- else
- {
- asm_p = INSN_ASM_P (insn);
-
- /* This could be an ASM insn which we'd like to schedule
- on the next cycle. */
- if (!FENCE_STARTS_CYCLE_P (fence) && asm_p)
- advance_one_cycle (fence);
- }
+#ifdef ENABLE_CHECKING
+ /* If debug parameters tell us to ignore this attempt to move an insn,
+ obey. */
+ remove_insns_for_debug (bnds, &av_vliw);
+#endif
- /* Set instruction scheduling info. This will be used in bundling,
- pipelining, tick computations etc. */
-
- memcpy (temp_state, FENCE_STATE (fence), dfa_state_size);
- INSN_AFTER_STALL_P (insn) = FENCE_AFTER_STALL_P (fence);
- INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence);
-
- /* This does not account for adjust_cost hooks, just add the biggest
- constant the hook may add to the latency. TODO: make this
- a target dependent constant. */
- INSN_READY_CYCLE (insn)
- = FENCE_CYCLE (fence) + (INSN_CODE (insn) < 0
- ? 1
- : maximal_insn_latency (insn) + 1);
- EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true;
-
- if (asm_p)
- advance_one_cycle (fence);
-
- FENCE_AFTER_STALL_P (fence) = 0;
- FENCE_STARTS_CYCLE_P (fence) = 0;
- FENCE_LAST_SCHEDULED_INSN (fence) = insn;
- VEC_safe_push (rtx, gc, FENCE_EXECUTING_INSNS (fence), insn);
- if (SCHED_GROUP_P (insn))
- {
- FENCE_SCHED_NEXT (fence) = INSN_SCHED_NEXT (insn);
- SCHED_GROUP_P (insn) = 0;
- }
- else
- FENCE_SCHED_NEXT (fence) = NULL_RTX;
- if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
- FENCE_READY_TICKS (fence) [INSN_UID (insn)] = 0;
+ /* Return early if we have nothing to schedule. */
+ if (av_vliw == NULL)
+ break;
- advance_deps_context (BND_DC (bnd), insn);
+ /* Choose the best expression and, if needed, destination register
+ for it. */
+ expr_vliw = find_best_expr (&av_vliw, bnds, fence);
+ if (!expr_vliw)
+ {
+ av_set_clear (&av_vliw);
+ break;
+ }
- /* Add insn to the list of scheduled on this cycle instructions. */
- ilist_add (*scheduled_insns_tailpp, insn);
- *scheduled_insns_tailpp = &ILIST_NEXT (**scheduled_insns_tailpp);
+ bndsp = &bnds;
+ bnds_tailp1 = bnds_tailp;
- if (sched_verbose >= 2)
- {
- sel_print ("Scheduling insn: ");
- dump_insn_1 (insn, 1);
- sel_print ("\n");
- }
+ do
+ /* !!! This code is guaranteed to execute only once. */
+ {
+ bnd_t bnd = BLIST_BND (*bndsp);
- /* Add new boundaries. */
- FOR_EACH_SUCC_1 (succ, si, insn,
- SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
+ if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr_vliw)))
{
- ilist_t ptr = ilist_copy (BND_PTR (bnd));
-
- ilist_add (&ptr, insn);
- blist_add (bnds_tailp, succ, ptr, FENCE_DC (fence));
- bnds_tailp = &BLIST_NEXT (*bnds_tailp);
+ bndsp = &BLIST_NEXT (*bndsp);
+ continue;
}
+
+ insn = schedule_expr_on_boundary (bnd, expr_vliw, seqno);
+ update_fence_and_insn (fence, insn);
+ bnds_tailp = update_boundaries (bnd, insn, bndsp, bnds_tailp);
- bnds_tail1 = *bnds_tailp1;
- blist_remove (bndsp);
-
- /* Check that the recent movement didn't destroyed loop
- structure. */
- gcc_assert (!pipelining_p
- || current_loop_nest == NULL
- || loop_latch_edge (current_loop_nest));
+ /* Add insn to the list of scheduled on this cycle instructions. */
+ ilist_add (*scheduled_insns_tailpp, insn);
+ *scheduled_insns_tailpp = &ILIST_NEXT (**scheduled_insns_tailpp);
}
- while (*bndsp != bnds_tail1);
+ while (*bndsp != *bnds_tailp1);
av_set_clear (&av_vliw);
- /* Indicate that we've scheduled something on this fence. */
- FENCE_SCHEDULED_P (fence) = true;
- scheduled_something_on_previous_fence = true;
-
- /* When can_issue_more is 0, variable_issue tells us that we should
- advance a cycle. */
- if (can_issue_more == 0)
- {
- need_stall = 1;
- break;
- }
-
/* We currently support information about candidate blocks only for
one 'target_bb' block. Hence we can't schedule after jump insn,
as this will bring two boundaries and, hence, necessity to handle
@@ -4860,7 +4920,7 @@ move_op_merge_succs (insn_t insn, insn_t succ ATTRIBUTE_UNUSED,
moveop_static_params_p sparams = static_params;
/* Nothing to do, if original expr wasn't found below. */
- if (!moveop_drv_call_res)
+ if (moveop_drv_call_res != 1)
return;
/* If this is a first successor. */
@@ -4923,7 +4983,7 @@ fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ,
on the code motion paths. These branches correspond to value
MOVEOP_DRV_CALL_RES==0 and include SUCCS_BACK and SUCCS_OUT, though
for such branches code_motion_path_driver is not called. */
- if (moveop_drv_call_res)
+ if (moveop_drv_call_res != 0)
return;
/* Mark all registers that do not meet the following condition:
@@ -5288,8 +5348,11 @@ move_op_ascend (insn_t insn, void *static_params)
enum MOVEUP_EXPR_CODE res;
moveop_static_params_p sparams = static_params;
- res = moveup_expr (sparams->c_expr, insn, false, NULL);
- gcc_assert (res != MOVEUP_EXPR_NULL);
+ if (! INSN_NOP_P (insn))
+ {
+ res = moveup_expr (sparams->c_expr, insn, false, NULL);
+ gcc_assert (res != MOVEUP_EXPR_NULL);
+ }
/* Update liveness for this insn as it was invalidated. */
update_liveness_on_insn (insn);
@@ -5349,8 +5412,7 @@ move_op_orig_expr_not_found (insn_t insn, av_set_t orig_ops ATTRIBUTE_UNUSED,
same destination register or memory. */
if (lhs_of_insn_equals_to_dest_p (insn, sparams->dest))
return false;
- else
- return true;
+ return true;
}
/* This function is called while descending current basic block if current
@@ -5437,16 +5499,15 @@ struct code_motion_path_driver_info_def fur_hooks = {
code_motion_path_driver is called recursively. Original operation
was found at least on one path that is starting with one of INSN's
successors (this fact is asserted). */
-static bool
-move_op_process_successors (insn_t insn, av_set_t orig_ops, ilist_t path,
- void *static_params)
+static int
+code_motion_process_successors (insn_t insn, av_set_t orig_ops,
+ ilist_t path, void *static_params)
{
int res = 0;
succ_iterator succ_i;
rtx succ;
struct cmpd_local_params lparams;
-
expr_def _x;
lparams.c_expr_local = &_x;
@@ -5475,8 +5536,9 @@ move_op_process_successors (insn_t insn, av_set_t orig_ops, ilist_t path,
successors. */
code_motion_path_driver_info->merge_succs (insn, succ, b, &lparams,
static_params);
-
- if (b != 0)
+ if (b == 1)
+ res = b;
+ else if (b == -1 && res != 1)
res = b;
}
@@ -5486,7 +5548,10 @@ move_op_process_successors (insn_t insn, av_set_t orig_ops, ilist_t path,
not found below. In most cases, this situation is an error.
The exception is when the original operation is blocked by
bookkeeping generated for another branch. */
- gcc_assert (res || av_set_could_be_blocked_by_bookkeeping_p (orig_ops));
+ gcc_assert (res == 1
+ || (res == 0
+ && av_set_could_be_blocked_by_bookkeeping_p (orig_ops))
+ || res == -1);
/* Merge data, clean up, etc. */
if (code_motion_path_driver_info->after_merge_succs)
@@ -5508,7 +5573,7 @@ code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p)
functionality dependent whether code_motion_path_driver_INFO is set to
&MOVE_OP_HOOKS or &FUR_HOOKS. This function implements the common parts
of code (CFG traversal etc) that are shared among both functions. */
-static bool
+static int
code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
cmpd_local_params_p local_params_in,
void *static_params)
@@ -5610,8 +5675,8 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
if (sched_verbose >= 6)
sel_print ("Found original operation at insn %d\n", INSN_UID (insn));
- code_motion_path_driver_info->orig_expr_found (
- insn, expr, local_params_in, static_params);
+ code_motion_path_driver_info->orig_expr_found
+ (insn, expr, local_params_in, static_params);
/* Step back, so on the way back we'll start traversing from the
previous insn (or we'll see that it's bb_note and skip that
@@ -5636,7 +5701,7 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
happen if we've encountered the previously created
bookkeeping. */
code_motion_path_driver_cleanup (&orig_ops, &path);
- return false;
+ return -1;
}
gcc_assert (orig_ops);
@@ -5653,7 +5718,7 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
bb_note, if original insn was a bb_head) or to the bb_end. */
if (!expr)
{
- bool res;
+ int res;
gcc_assert (insn == sel_bb_end (bb));
@@ -5662,16 +5727,16 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
if (insn != bb_head)
ilist_add (&path, insn);
- /* move_op_process_successors should be able to find at least one
+ /* Process_successors should be able to find at least one
successor for which code_motion_path_driver returns TRUE. */
- res = move_op_process_successors (insn, orig_ops,
- path, static_params);
+ res = code_motion_process_successors (insn, orig_ops,
+ path, static_params);
/* Remove bb tail from path. */
if (insn != bb_head)
ilist_remove (&path);
- if (!res)
+ if (res != 1)
{
/* This is the case when one of the original expr is no longer available
due to bookkeeping created on this branch with the same register.
@@ -5680,7 +5745,7 @@ code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
FALSE when we've encountered a previously generated bookkeeping
insn in moveop_orig_expr_not_found. */
code_motion_path_driver_cleanup (&orig_ops, &path);
- return false;
+ return res;
}
}
@@ -6244,159 +6309,188 @@ sel_region_finish (void)
/* Functions that implement the scheduler driver. */
-/* The main driver for scheduling a region. This function is responsible
- for correct propagation of fences (i.e. scheduling points) and creating
- a group of parallel insns at each of them. It also supports
- pipelining. */
+/* Schedule a parallel instruction group on each of FENCES. */
static void
-sel_sched_region_2 (sel_sched_region_2_data_t data)
+schedule_on_fences (flist_t fences, int max_seqno,
+ ilist_t **scheduled_insns_tailpp)
{
- int orig_max_seqno = data->orig_max_seqno;
- int highest_seqno_in_use = orig_max_seqno;
+ flist_t old_fences = fences;
- stat_bookkeeping_copies = 0;
- stat_insns_needed_bookkeeping = 0;
- stat_renamed_scheduled = 0;
- stat_substitutions_total = 0;
- num_insns_scheduled = 0;
+ if (sched_verbose >= 1)
+ {
+ sel_print ("Scheduling on fences: ");
+ dump_flist (fences);
+ sel_print ("\n");
+ }
- while (fences)
+ scheduled_something_on_previous_fence = false;
+ for (; fences; fences = FLIST_NEXT (fences))
{
- flist_t fences1;
- struct flist_tail_def _new_fences, *new_fences = &_new_fences;
- int min_f, max_f, new_hs;
- ilist_t scheduled_insns = NULL;
- ilist_t *scheduled_insns_tailp = &scheduled_insns;
- ilist_iterator ii;
- insn_t insn;
+ fence_t fence = NULL;
+ int seqno = 0;
+ flist_t fences2;
+ bool first_p = true;
+
+ /* Choose the next fence group to schedule.
+ The fact that insn can be scheduled only once
+ on the cycle is guaranteed by two properties:
+ 1. seqnos of parallel groups decrease with each iteration.
+ 2. If is_ineligible_successor () sees the larger seqno, it
+ checks if candidate insn is_in_current_fence_p (). */
+ for (fences2 = old_fences; fences2; fences2 = FLIST_NEXT (fences2))
+ {
+ fence_t f = FLIST_FENCE (fences2);
- scheduled_something_on_previous_fence = false;
- flist_tail_init (new_fences);
+ if (!FENCE_PROCESSED_P (f))
+ {
+ int i = INSN_SEQNO (FENCE_INSN (f));
- if (sched_verbose >= 1)
- {
- sel_print ("Scheduling on fences: ");
- dump_flist (fences);
- sel_print ("\n");
+ if (first_p || i > seqno)
+ {
+ seqno = i;
+ fence = f;
+ first_p = false;
+ }
+ else
+ /* ??? Seqnos of different groups should be different. */
+ gcc_assert (1 || i != seqno);
+ }
}
-
- /* Calculate MIN_F and MAX_F. */
- min_f = max_f = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
- fences1 = fences;
- while ((fences1 = FLIST_NEXT (fences1)))
- {
- int seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences1)));
- if (min_f > seqno)
- min_f = seqno;
- else if (max_f < seqno)
- max_f = seqno;
- }
-
- fences1 = fences;
- do
- {
- fence_t fence = NULL;
- /* SEQNO is set to '0' to avoid 'uninitialized warning'. */
- int seqno = 0;
- flist_t fences2 = fences;
- bool first_p = true;
-
- /* Choose the next fence group to schedule.
- NB: The fact, that insn can be scheduled only once
- on the cycle is guaranteed by two properties:
- 1. seqnos of parallel groups decrease with each iteration.
- 2. If is_ineligible_successor () sees the larger seqno, it
- checks if candidate insn is_in_current_fence_p (). */
- do
- {
- fence_t f = FLIST_FENCE (fences2);
+ gcc_assert (fence);
- if (!FENCE_PROCESSED_P (f))
- {
- int i = INSN_SEQNO (FENCE_INSN (f));
+ /* As FENCE is nonnull, SEQNO is initialized. */
+ seqno -= max_seqno + 1;
+ fill_insns (fence, seqno, scheduled_insns_tailpp);
+ FENCE_PROCESSED_P (fence) = true;
+ }
- if (first_p || i > seqno)
- {
- seqno = i;
- fence = f;
- first_p = false;
- }
- else
- /* ??? Seqnos of different groups should be different. */
- gcc_assert (1 || i != seqno);
- }
- }
- while ((fences2 = FLIST_NEXT (fences2)));
+ /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
+ don't need to keep bookkeeping-invalidated exprs any more. */
+ vec_bk_blocked_exprs_clear ();
+}
- gcc_assert (fence);
+/* Calculate MIN_SEQNO and MAX_SEQNO. */
+static void
+find_min_max_seqno (flist_t fences, int *min_seqno, int *max_seqno)
+{
+ *min_seqno = *max_seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
- /* As FENCE is nonnull, SEQNO is initialized. */
- seqno -= max_f + 1;
- fill_insns (fence, seqno, &scheduled_insns_tailp);
- FENCE_PROCESSED_P (fence) = true;
- }
- while ((fences1 = FLIST_NEXT (fences1)));
+ /* The first element is already processed. */
+ while ((fences = FLIST_NEXT (fences)))
+ {
+ int seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
+
+ if (*min_seqno > seqno)
+ *min_seqno = seqno;
+ else if (*max_seqno < seqno)
+ *max_seqno = seqno;
+ }
+}
- fences1 = fences;
- do
- {
- fence_t fence = FLIST_FENCE (fences1);
- insn_t insn;
+/* Calculate new fences from FENCES. */
+static flist_t
+calculate_new_fences (flist_t fences, sel_sched_region_2_data_t data,
+ int orig_max_seqno)
+{
+ flist_t old_fences = fences;
+ struct flist_tail_def _new_fences, *new_fences = &_new_fences;
- if (/* This fence doesn't have any successors. */
- !FENCE_BNDS (fence))
- {
- if (!FENCE_SCHEDULED_P (fence))
- /* Nothing was scheduled on this fence. */
- {
- int seqno;
+ flist_tail_init (new_fences);
+ for (; fences; fences = FLIST_NEXT (fences))
+ {
+ fence_t fence = FLIST_FENCE (fences);
+ insn_t insn;
+
+ if (!FENCE_BNDS (fence))
+ {
+ /* This fence doesn't have any successors. */
+ if (!FENCE_SCHEDULED_P (fence))
+ {
+ /* Nothing was scheduled on this fence. */
+ int seqno;
+
+ insn = FENCE_INSN (fence);
+ seqno = INSN_SEQNO (insn);
+ gcc_assert (seqno > 0 && seqno <= orig_max_seqno);
+
+ if (sched_verbose >= 1)
+ sel_print ("Fence %d[%d] has not changed\n",
+ INSN_UID (insn),
+ BLOCK_NUM (insn));
+ move_fence_to_fences (fences, new_fences);
+ }
+ }
+ else
+ extract_new_fences_from (fences, new_fences, data);
+ }
- insn = FENCE_INSN (fence);
- seqno = INSN_SEQNO (insn);
- gcc_assert (seqno > 0 && seqno <= orig_max_seqno);
+ flist_clear (&old_fences);
+ return FLIST_TAIL_HEAD (new_fences);
+}
- if (sched_verbose >= 1)
- sel_print ("Fence %d[%d] has not changed\n",
- INSN_UID (insn),
- BLOCK_NUM (insn));
- move_fence_to_fences (fences1, new_fences);
- }
- }
- else
- extract_new_fences_from (fences1, new_fences, data);
- }
- while ((fences1 = FLIST_NEXT (fences1)));
+/* Update seqnos of SCHEDULED_INSNS. */
+static int
+update_seqnos_and_stage (int min_seqno, int max_seqno,
+ int highest_seqno_in_use,
+ ilist_t *pscheduled_insns)
+{
+ int new_hs;
+ ilist_iterator ii;
+ insn_t insn;
+
+ /* Actually, new_hs is the seqno of the instruction, that was
+ scheduled first (i.e. it is the first one in SCHEDULED_INSNS). */
+ if (*pscheduled_insns)
+ {
+ new_hs = (INSN_SEQNO (ILIST_INSN (*pscheduled_insns))
+ + highest_seqno_in_use + max_seqno - min_seqno + 2);
+ gcc_assert (new_hs > highest_seqno_in_use);
+ }
+ else
+ new_hs = highest_seqno_in_use;
- flist_clear (&fences);
- fences = FLIST_TAIL_HEAD (new_fences);
-
- /* Actually, new_hs is the seqno of the instruction, that was
- scheduled first (i.e. it is the first one in SCHEDULED_INSNS). */
- if (scheduled_insns)
- {
- new_hs = (INSN_SEQNO (ILIST_INSN (scheduled_insns))
- + highest_seqno_in_use + max_f - min_f + 2);
- gcc_assert (new_hs > highest_seqno_in_use);
- }
- else
- new_hs = highest_seqno_in_use;
+ FOR_EACH_INSN (insn, ii, *pscheduled_insns)
+ {
+ gcc_assert (INSN_SEQNO (insn) < 0);
+ INSN_SEQNO (insn) += highest_seqno_in_use + max_seqno - min_seqno + 2;
+ gcc_assert (INSN_SEQNO (insn) <= new_hs);
+ }
- FOR_EACH_INSN (insn, ii, scheduled_insns)
- {
- gcc_assert (INSN_SEQNO (insn) < 0);
- INSN_SEQNO (insn) += highest_seqno_in_use + max_f - min_f + 2;
- gcc_assert (INSN_SEQNO (insn) <= new_hs);
- }
- ilist_clear (&scheduled_insns);
+ ilist_clear (pscheduled_insns);
+ global_level++;
- highest_seqno_in_use = new_hs;
+ return new_hs;
+}
- global_level++;
+/* The main driver for scheduling a region. This function is responsible
+ for correct propagation of fences (i.e. scheduling points) and creating
+ a group of parallel insns at each of them. It also supports
+ pipelining. */
+static void
+sel_sched_region_2 (sel_sched_region_2_data_t data)
+{
+ int orig_max_seqno = data->orig_max_seqno;
+ int highest_seqno_in_use = orig_max_seqno;
+
+ stat_bookkeeping_copies = 0;
+ stat_insns_needed_bookkeeping = 0;
+ stat_renamed_scheduled = 0;
+ stat_substitutions_total = 0;
+ num_insns_scheduled = 0;
+
+ while (fences)
+ {
+ int min_seqno, max_seqno;
+ ilist_t scheduled_insns = NULL;
+ ilist_t *scheduled_insns_tailp = &scheduled_insns;
- /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
- don't need to keep bookkeeping-invalidated exprs any more. */
- vec_bk_blocked_exprs_clear ();
+ find_min_max_seqno (fences, &min_seqno, &max_seqno);
+ schedule_on_fences (fences, max_seqno, &scheduled_insns_tailp);
+ fences = calculate_new_fences (fences, data, orig_max_seqno);
+ highest_seqno_in_use = update_seqnos_and_stage (min_seqno, max_seqno,
+ highest_seqno_in_use,
+ &scheduled_insns);
}
gcc_assert (data->orig_max_seqno == orig_max_seqno);