aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Melnik <dm@ispras.ru>2008-01-11 13:52:05 +0000
committerAndrey Belevantsev <abel@ispras.ru>2008-01-11 13:52:05 +0000
commit20bd32cd2d29c038de70ec2574acfd22bf9441ab (patch)
tree67c39706509ac2179287dd3a9e09924810fab399
parentb6c4efb9fe7c66f4d2c9cf2ab3c3cae8761a12bf (diff)
2008-01-11 Dmitry Melnik <dm@ispras.ru>
Fix different scheduler issues that prevent it from working correctly before reload. * common.opt (fsel-insn-range): New flag for debugging. * ia64.c (get_mode_no_for_insn): Change the condition that prevents use of special hardware registers so it can now handle pseudos. (get_spec_check_gen_function): Assert so that we won't reference array with a negative index. (ia64_gen_spec_check): Ditto. * haifa-sched.c (after_recovery, adding_bb_to_current_region_p): New variables to handle correct insertion of the recovery code. (init_before_recovery): Add new BEFORE_RECOVERY_PTR parameter. (sched_extend_bb): Add the prototype. (haifa_sched_init): Init new variable. (init_before_recovery): Add new param. Fix the handling of the case when we insert a recovery code before the EXIT which has a predecessor with a fallthrough edge to it. (sched_create_recovery_block): Pass new parameter. (create_check_block_twin): Change parameter. (haifa_init_insn): Do region specific insn initialization only if the insn was added to the current region. * opts.c (common_handle_option): Handle the fsel_insn_range option. * rtl.h (sel_sched_fix_param): Declare. * sched-deps.c (extend_deps_reg_info): Make sure REG_LAST vector is extended correctly before reload. (sched_analyze_reg): Permit move insn with pseudo even if it crosses call. (free_deps): Zero pointers explicitly after free. * sched-int.h (sched_create_recovery_block): Add new parameter. * sel-sched-dump.c (flag_insn_range): Insn range to perform selective scheduling. (block_for_insn, bb_av_set): New debug functions. (in_range_p_1, in_range_p): Parses expression that determines insn range for selective scheduling. (sel_sched_fix_param): Handle fsel-insn-range param in scheduler. (av_set_contains_insn_with_uid, av_set_for_bb_n): Debug functions. * sel-sched-dump.h (flag_insn_range, block_for_insn, bb_av_set, in_range_p, av_set_contains_insn_with_uid, av_set_for_bb_n): Declare. * sel-sched-ir.c (init_fences): Free unneeded successors. (sel_hash_rtx): Handle PRE_INC etc. as it's done in original hash_rtx. (sel_gen_insn_from_rtx_after): Add the assert that target reg is available. Postpone the simplification of the insns until the end of the scheduling. (av_set_add): Assert that we never insert NOPs into the av_sets. (merge_with_other_exprs): Reset EXPR_TARGET_AVAILABLE to -1 if two exprs have different target availability values. (av_set_leave_one): Replace with av_set_leave_one_nonspec. This function keeps all speculative insns, leaving only one non-speculative. (init_first_time_insn_data): Do not init dependences on NOPs. (sel_remove_insn): Do not clear data sets on the AFTER_RECOVERY basic block. (finish_insns): Free dependence contexts that may have left from insns that were removed during the scheduling. (sel_rtl_insn_added): Init new insns only if they're added to the current region. (copy_lv_set_from): New function. (is_ineligible_successor): Move to sel-sched.c. (sel_init_only_bb): Add call to extend_regions. (sel_create_recovery_block): Copy LV_SET to BEFORE_RECOVERY bb. * sel-sched-ir.h (adding_bb_to_current_region_p, after_recovery, av_set_leave_one_nonspec): Declare. (copy_insn_out_of_stream, copy_insn_and_insert_before, is_ineligible_successor): Remove unused declarations. (_succ_iter_cond): Handle AFTER_RECOVERY the same way as EXIT. * sel-sched.c (scheduled_something_on_previous_fence): New variable. (vec_temp_moveop_nops): New vector to store temporary NOPs inserted in move_op to prevent removal of empty BBs. (substitute_reg_in_rhs): Do not allow clobbering the address register of speculative insns. (replace_in_vinsn_using_bitmask_1, replace_in_rtx_using_bitmask, un_substitute): Remove. (verify_target_availability): Assert target availability if only something was scheduled on the previous fence. (has_spec_dependence_p, un_speculate): Remove. (undo_transformations): Use history to undo transformations. (is_ineligible_successor): Move the function here. (compute_av_set): Free SUCCS that are no longer needed. (find_used_regs_1): Use av_set_leave_one_nonspec. (expr_blocked_by_bookkeeping, vec_bk_blocked_exprs_clear): New functions. (fill_vec_av_set): Recompute target availability or remove expr if the it's availability was invalidated by the insertion of bookkeeping earlier. (remove_temp_moveop_nops): New function. Removes the temporary NOPs that might have been created during move_op. (fill_insns): Handle only insns that satisfy the FLAG_INSN_RANGE expression. Call remove_temp_moveop_nops after the move_op. Init EXPR_TARGET_AVAILABLE with TRUE by default. Init scheduled_something_on_previous_fence. (update_and_record_unavailable_insns): New function. Updates data sets for the bookkeeping block and records those expressions that become no longer available after inserting the bookkeeping. (move_op): Use av_set_leave_one_nonspec. Insert NOP in the bb if it's becoming empty so it won't be removed and we'll be able to insert bookkeeping in it when coming later through another branch. Call update_and_record_unavailable_insns on the bookkeeping block to record exprs that no longer can be scheduled above it. (simplify_changed_insns): New function. Handles insn simplification after the scheduling have completed. (sel_region_finish): Use simplify_changed_insns. Free vector vec_bk_blocked_exprs. (sel_sched_region_2): Init scheduled_something_on_previous_fence and vec_bk_blocked_exprs. (sel_global_init): Init after_recovery. git-svn-id: https://gcc.gnu.org/svn/gcc/branches/sel-sched-branch@131464 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog.sel-sched108
-rw-r--r--gcc/common.opt7
-rw-r--r--gcc/config/ia64/ia64.c5
-rw-r--r--gcc/haifa-sched.c45
-rw-r--r--gcc/opts.c11
-rw-r--r--gcc/rtl.h3
-rw-r--r--gcc/sched-deps.c18
-rw-r--r--gcc/sched-int.h2
-rw-r--r--gcc/sel-sched-dump.c129
-rw-r--r--gcc/sel-sched-dump.h6
-rw-r--r--gcc/sel-sched-ir.c168
-rw-r--r--gcc/sel-sched-ir.h11
-rw-r--r--gcc/sel-sched.c584
13 files changed, 764 insertions, 333 deletions
diff --git a/gcc/ChangeLog.sel-sched b/gcc/ChangeLog.sel-sched
index c5b1eb86a4d..b4303a247f6 100644
--- a/gcc/ChangeLog.sel-sched
+++ b/gcc/ChangeLog.sel-sched
@@ -1,3 +1,111 @@
+2008-01-11 Dmitry Melnik <dm@ispras.ru>
+
+ Fix different scheduler issues that prevent it from working
+ correctly before reload.
+
+ * common.opt (fsel-insn-range): New flag for debugging.
+ * ia64.c (get_mode_no_for_insn): Change the condition that prevents use
+ of special hardware registers so it can now handle pseudos.
+ (get_spec_check_gen_function): Assert so that we won't reference array
+ with a negative index.
+ (ia64_gen_spec_check): Ditto.
+ * haifa-sched.c (after_recovery, adding_bb_to_current_region_p):
+ New variables to handle correct insertion of the recovery code.
+ (init_before_recovery): Add new BEFORE_RECOVERY_PTR parameter.
+ (sched_extend_bb): Add the prototype.
+ (haifa_sched_init): Init new variable.
+ (init_before_recovery): Add new param. Fix the handling of the case
+ when we insert a recovery code before the EXIT which has a predecessor
+ with a fallthrough edge to it.
+ (sched_create_recovery_block): Pass new parameter.
+ (create_check_block_twin): Change parameter.
+ (haifa_init_insn): Do region specific insn initialization only if
+ the insn was added to the current region.
+ * opts.c (common_handle_option): Handle the fsel_insn_range option.
+ * rtl.h (sel_sched_fix_param): Declare.
+ * sched-deps.c (extend_deps_reg_info): Make sure REG_LAST vector is
+ extended correctly before reload.
+ (sched_analyze_reg): Permit move insn with pseudo even if it crosses
+ call.
+ (free_deps): Zero pointers explicitly after free.
+ * sched-int.h (sched_create_recovery_block): Add new parameter.
+ * sel-sched-dump.c (flag_insn_range): Insn range to perform selective
+ scheduling.
+ (block_for_insn, bb_av_set): New debug functions.
+ (in_range_p_1, in_range_p): Parses expression that determines insn
+ range for selective scheduling.
+ (sel_sched_fix_param): Handle fsel-insn-range param in scheduler.
+ (av_set_contains_insn_with_uid, av_set_for_bb_n): Debug functions.
+ * sel-sched-dump.h (flag_insn_range, block_for_insn, bb_av_set,
+ in_range_p, av_set_contains_insn_with_uid, av_set_for_bb_n):
+ Declare.
+ * sel-sched-ir.c (init_fences): Free unneeded successors.
+ (sel_hash_rtx): Handle PRE_INC etc. as it's done in original hash_rtx.
+ (sel_gen_insn_from_rtx_after): Add the assert that target reg is
+ available. Postpone the simplification of the insns until the end of
+ the scheduling.
+ (av_set_add): Assert that we never insert NOPs into the av_sets.
+ (merge_with_other_exprs): Reset EXPR_TARGET_AVAILABLE to -1 if two
+ exprs have different target availability values.
+ (av_set_leave_one): Replace with av_set_leave_one_nonspec. This
+ function keeps all speculative insns, leaving only one non-speculative.
+ (init_first_time_insn_data): Do not init dependences on NOPs.
+ (sel_remove_insn): Do not clear data sets on the AFTER_RECOVERY basic
+ block.
+ (finish_insns): Free dependence contexts that may have left from
+ insns that were removed during the scheduling.
+ (sel_rtl_insn_added): Init new insns only if they're added to the
+ current region.
+ (copy_lv_set_from): New function.
+ (is_ineligible_successor): Move to sel-sched.c.
+ (sel_init_only_bb): Add call to extend_regions.
+ (sel_create_recovery_block): Copy LV_SET to BEFORE_RECOVERY bb.
+ * sel-sched-ir.h (adding_bb_to_current_region_p, after_recovery,
+ av_set_leave_one_nonspec): Declare.
+ (copy_insn_out_of_stream, copy_insn_and_insert_before,
+ is_ineligible_successor): Remove unused declarations.
+ (_succ_iter_cond): Handle AFTER_RECOVERY the same way as EXIT.
+ * sel-sched.c (scheduled_something_on_previous_fence): New variable.
+ (vec_temp_moveop_nops): New vector to store temporary NOPs inserted
+ in move_op to prevent removal of empty BBs.
+ (substitute_reg_in_rhs): Do not allow clobbering the address register
+ of speculative insns.
+ (replace_in_vinsn_using_bitmask_1, replace_in_rtx_using_bitmask,
+ un_substitute): Remove.
+ (verify_target_availability): Assert target availability if only
+ something was scheduled on the previous fence.
+ (has_spec_dependence_p, un_speculate): Remove.
+ (undo_transformations): Use history to undo transformations.
+ (is_ineligible_successor): Move the function here.
+ (compute_av_set): Free SUCCS that are no longer needed.
+ (find_used_regs_1): Use av_set_leave_one_nonspec.
+ (expr_blocked_by_bookkeeping, vec_bk_blocked_exprs_clear):
+ New functions.
+ (fill_vec_av_set): Recompute target availability or remove expr if the
+ it's availability was invalidated by the insertion of bookkeeping
+ earlier.
+ (remove_temp_moveop_nops): New function. Removes the temporary NOPs
+ that might have been created during move_op.
+ (fill_insns): Handle only insns that satisfy the FLAG_INSN_RANGE
+ expression. Call remove_temp_moveop_nops after the move_op.
+ Init EXPR_TARGET_AVAILABLE with TRUE by default.
+ Init scheduled_something_on_previous_fence.
+ (update_and_record_unavailable_insns): New function. Updates data sets
+ for the bookkeeping block and records those expressions that become
+ no longer available after inserting the bookkeeping.
+ (move_op): Use av_set_leave_one_nonspec. Insert NOP in the bb if it's
+ becoming empty so it won't be removed and we'll be able to insert
+ bookkeeping in it when coming later through another branch. Call
+ update_and_record_unavailable_insns on the bookkeeping block to
+ record exprs that no longer can be scheduled above it.
+ (simplify_changed_insns): New function. Handles insn simplification
+ after the scheduling have completed.
+ (sel_region_finish): Use simplify_changed_insns. Free vector
+ vec_bk_blocked_exprs.
+ (sel_sched_region_2): Init scheduled_something_on_previous_fence and
+ vec_bk_blocked_exprs.
+ (sel_global_init): Init after_recovery.
+
2008-01-10 Andrey Belevantsev <abel@ispras.ru>
Properly fixup a merge with trunk rev. 130104.
diff --git a/gcc/common.opt b/gcc/common.opt
index ec2ca9b6809..4af37ec0f47 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -946,6 +946,13 @@ fsel-sched-dump-cfg
Common Report Var(flag_sel_sched_dump_cfg) Init(0)
Dump CFG information during selective scheduling pass.
+fsel-insn-range
+Common
+
+fsel-insn-range=
+Common Joined RejectNegative
+fsel-insn-range=<number> Expression that determines range of insns to handle with sel-sched
+
fsel-sched-single-block-regions
Common Report Var(flag_sel_sched_single_block_regions) Init(0)
Run selective scheduling on single-block regions
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 96e66645b04..653ae067ca7 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -7284,8 +7284,9 @@ get_mode_no_for_insn (rtx insn)
{
if (!reload_completed)
{
+ /* Do not speculate into regs like ar.lc. */
if (!REG_P (reg)
- || !(GR_REGNO_P (REGNO (reg)) || FP_REGNO_P (REGNO (reg))))
+ || (AR_CCV_REGNUM <= REGNO (reg) && REGNO (reg) <= AR_EC_REGNUM))
return -1;
if (!MEM_P (mem))
@@ -7658,6 +7659,7 @@ get_spec_check_gen_function (ds_t ts, int mode_no,
else
gcc_unreachable ();
+ gcc_assert (mode_no >= 0);
return gen_check[mode_no];
}
@@ -7684,6 +7686,7 @@ ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
int mode_no;
mode_no = get_mode_no_for_insn (insn);
+ gcc_assert (mode_no >= 0);
if (label)
op1 = label;
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index 1dcbf2e4f25..265111615c0 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -146,6 +146,7 @@ along with GCC; see the file COPYING3. If not see
#include "params.h"
#include "vecprim.h"
#include "dbgcnt.h"
+#include "cfgloop.h"
#ifdef INSN_SCHEDULING
@@ -227,6 +228,13 @@ static rtx *bb_header = 0;
/* Basic block after which recovery blocks will be created. */
static basic_block before_recovery;
+/* Basic block just before the EXIT_BLOCK and after recovery, if we have
+ created it. */
+basic_block after_recovery;
+
+/* FALSE if we add bb to another region, so we don't need to initialize it. */
+bool adding_bb_to_current_region_p = true;
+
/* Queues, etc. */
/* An instruction is ready to be scheduled when all insns preceding it
@@ -519,7 +527,7 @@ static void generate_recovery_code (rtx);
static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
static void begin_speculative_block (rtx);
static void add_to_speculative_block (rtx);
-static void init_before_recovery (void);
+static void init_before_recovery (basic_block *);
static void create_check_block_twin (rtx, bool);
static void fix_recovery_deps (basic_block);
static void haifa_change_pattern (rtx, rtx);
@@ -532,6 +540,7 @@ static void sched_remove_insn (rtx);
static void clear_priorities (rtx, rtx_vec_t *);
static void calc_priorities (rtx_vec_t);
static void add_jump_dependencies (rtx, rtx);
+static void sched_extend_bb (void);
#ifdef ENABLE_CHECKING
static int has_edge_p (VEC(edge,gc) *, int);
static void check_cfg (rtx, rtx);
@@ -2944,6 +2953,7 @@ haifa_sched_init (void)
nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
before_recovery = 0;
+ after_recovery = 0;
}
/* Finish work with the data specific to the Haifa scheduler. */
@@ -3741,7 +3751,7 @@ find_fallthru_edge (basic_block pred)
/* Initialize BEFORE_RECOVERY variable. */
static void
-init_before_recovery (void)
+init_before_recovery (basic_block *before_recovery_ptr)
{
basic_block last;
edge e;
@@ -3760,9 +3770,23 @@ init_before_recovery (void)
basic_block single, empty;
rtx x, label;
+ /* If the fallthrough edge to exit we've found is from the block we've
+ created before, don't do anything more. */
+ if (last == after_recovery)
+ return;
+
+ adding_bb_to_current_region_p = false;
+
single = sched_create_empty_bb (last);
empty = sched_create_empty_bb (single);
+ /* Add new blocks to the root loop. */
+ if (current_loops != NULL)
+ {
+ add_bb_to_loop (single, VEC_index (loop_p, current_loops->larray, 0));
+ add_bb_to_loop (empty, VEC_index (loop_p, current_loops->larray, 0));
+ }
+
single->count = last->count;
empty->count = last->count;
single->frequency = last->frequency;
@@ -3785,8 +3809,14 @@ init_before_recovery (void)
sched_init_only_bb (empty, NULL);
sched_init_only_bb (single, NULL);
+ sched_extend_bb ();
+ adding_bb_to_current_region_p = true;
before_recovery = single;
+ after_recovery = empty;
+
+ if (before_recovery_ptr)
+ *before_recovery_ptr = before_recovery;
if (sched_verbose >= 2 && spec_info->dump)
fprintf (spec_info->dump,
@@ -3799,7 +3829,7 @@ init_before_recovery (void)
/* Returns new recovery block. */
basic_block
-sched_create_recovery_block (void)
+sched_create_recovery_block (basic_block *before_recovery_ptr)
{
rtx label;
rtx barrier;
@@ -3808,7 +3838,7 @@ sched_create_recovery_block (void)
haifa_recovery_bb_recently_added_p = true;
haifa_recovery_bb_ever_added_p = true;
- init_before_recovery ();
+ init_before_recovery (before_recovery_ptr);
barrier = get_last_bb_insn (before_recovery);
gcc_assert (BARRIER_P (barrier));
@@ -3827,8 +3857,6 @@ sched_create_recovery_block (void)
fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
rec->index);
- before_recovery = rec;
-
return rec;
}
@@ -3913,7 +3941,7 @@ create_check_block_twin (rtx insn, bool mutate_p)
/* Create recovery block. */
if (mutate_p || targetm.sched.needs_block_p (todo_spec))
{
- rec = sched_create_recovery_block ();
+ rec = sched_create_recovery_block (NULL);
label = BB_HEAD (rec);
}
else
@@ -5013,7 +5041,8 @@ haifa_init_insn (rtx insn)
sched_deps_init (false);
haifa_init_h_i_d (NULL, NULL, NULL, insn);
- sd_init_insn (insn);
+ if (adding_bb_to_current_region_p)
+ sd_init_insn (insn);
}
void (* sched_init_only_bb) (basic_block, basic_block);
diff --git a/gcc/opts.c b/gcc/opts.c
index 7f3594a3deb..9ac9cf1d217 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -837,7 +837,7 @@ decode_options (unsigned int argc, const char **argv)
flag_schedule_insns = 1;
/* Turn off the sched2 pass in favor to selective scheduling. */
flag_schedule_insns_after_reload = 1;
- flag_selective_scheduling = 0;
+ flag_selective_scheduling = 0;
flag_selective_scheduling2 = 1;
#endif
flag_regmove = 1;
@@ -1675,6 +1675,15 @@ common_handle_option (size_t scode, const char *arg, int value,
set_random_seed (arg);
break;
+ case OPT_fsel_insn_range:
+ if (value)
+ return 0;
+ break;
+
+ case OPT_fsel_insn_range_:
+ sel_sched_fix_param ("insn-range", arg);
+ break;
+
case OPT_fsched_verbose_:
#ifdef INSN_SCHEDULING
fix_sched_param ("verbose", arg);
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 24e7a41c8f5..c9c54869978 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -2129,6 +2129,9 @@ extern void schedule_ebbs (void);
/* In haifa-sched.c. */
extern void fix_sched_param (const char *, const char *);
+/* In sel-sched-dump.c. */
+extern void sel_sched_fix_param (const char *param, const char *val);
+
/* In print-rtl.c */
extern const char *print_rtx_head;
extern void debug_rtx (const_rtx);
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 811f672eaea..18e427fb73f 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -1621,15 +1621,15 @@ extend_deps_reg_info (struct deps *deps, int regno)
int max_regno = regno + 1;
gcc_assert (!reload_completed);
-
+
/* In a readonly context, it would not hurt to extend info,
but it should not be needed. */
- if (deps->readonly)
+ if (reload_completed && deps->readonly)
{
deps->max_reg = max_regno;
return;
}
-
+
if (max_regno > deps->max_reg)
{
deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
@@ -1705,8 +1705,13 @@ sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
}
/* Don't let it cross a call after scheduling if it doesn't
- already cross one. */
- if (REG_N_CALLS_CROSSED (regno) == 0)
+ already cross one.
+ If REGNO >= REG_INFO_P_SIZE then it was introduced in our scheduler,
+ and it could have happened only before reload. Thus, we can consider
+ INSN moveable, since reload should take care of the all the operations
+ renamed into new pseudos. */
+ if (regno < FIRST_PSEUDO_REGISTER
+ && REG_N_CALLS_CROSSED (regno) == 0)
{
if (!deps->readonly
&& ref == USE)
@@ -2905,6 +2910,9 @@ free_deps (struct deps *deps)
CLEAR_REG_SET (&deps->reg_conditional_sets);
free (deps->reg_last);
+ deps->reg_last = NULL;
+
+ deps = NULL;
}
/* An array indexed by INSN_UID that holds the data related
diff --git a/gcc/sched-int.h b/gcc/sched-int.h
index f4f6769da2b..e3520a552e4 100644
--- a/gcc/sched-int.h
+++ b/gcc/sched-int.h
@@ -208,7 +208,7 @@ extern basic_block sched_split_block_1 (basic_block, rtx);
extern basic_block (* sched_create_empty_bb) (basic_block);
extern basic_block sched_create_empty_bb_1 (basic_block);
-extern basic_block sched_create_recovery_block (void);
+extern basic_block sched_create_recovery_block (basic_block *);
extern void sched_create_recovery_edges (basic_block, basic_block,
basic_block);
diff --git a/gcc/sel-sched-dump.c b/gcc/sel-sched-dump.c
index 1704a879e4d..607f177fd85 100644
--- a/gcc/sel-sched-dump.c
+++ b/gcc/sel-sched-dump.c
@@ -85,6 +85,11 @@ bool sched_dump_to_dot_p = false;
/* Controls how insns from a fence list should be dumped. */
static int dump_flist_insn_flags = (DUMP_INSN_UID | DUMP_INSN_BBN
| DUMP_INSN_SEQNO);
+
+/* Stores an expression according to which either insns will be scheduled
+ using all features of the selective scheduling or the corresponding
+ code motion will be skipped. */
+const char *flag_insn_range = NULL;
/* Core functions for pretty printing. */
@@ -1125,6 +1130,19 @@ insn_uid (rtx insn)
return INSN_UID (insn);
}
+basic_block
+block_for_insn (rtx insn)
+{
+ return BLOCK_FOR_INSN (insn);
+}
+
+
+av_set_t
+bb_av_set (basic_block bb)
+{
+ return BB_AV_SET (bb);
+}
+
rtx insn_pattern (rtx insn)
{
return PATTERN (insn);
@@ -1368,3 +1386,114 @@ debug_find_unreachable_blocks (void)
return !(pre_order_num == n_basic_blocks - NUM_FIXED_BLOCKS);
}
+
+/* Helper function for in_range_p. */
+static int in_range_p_1 (int val, const char *expr, int i1, int i2, bool *err)
+{
+ int br = 0;
+ int i = -1, x;
+ char ops[] = "|&-";
+ char *p;
+ char c = 0;
+
+ if (i1 > i2)
+ *err = true;
+
+ if (*err)
+ return 0;
+
+ for (p = ops; *p; p++)
+ {
+ for (i = i2; i >= i1; i--)
+ {
+ c = expr[i];
+ if (c == ')')
+ br++;
+ else if (c == '(')
+ br--;
+
+ if (!br && c == *p)
+ goto l;
+ }
+ }
+
+ l: if (br)
+ {
+ *err = 1;
+ return 0;
+ }
+
+ if (*p) {
+ if (c == '&')
+ return in_range_p_1 (val, expr, i1, i-1, err)
+ && in_range_p_1 (val, expr, i+1, i2, err);
+
+ if (c == '|')
+ return in_range_p_1 (val, expr, i1, i-1, err)
+ || in_range_p_1 (val, expr, i+1, i2, err);
+ }
+
+ if (expr[i1] == '(' && expr[i2] == ')')
+ return in_range_p_1 (val, expr, i1+1, i2-1, err);
+
+ if (expr[i1] == '!')
+ return !in_range_p_1 (val, expr, i1+1, i2, err);
+
+ if (*p && c == '-')
+ return (in_range_p_1 (val, expr, i1, i-1, err) <= val)
+ && val <= in_range_p_1 (val, expr, i+1, i2, err);
+
+ sscanf (expr+i1, "%d%n", &x, &i);
+ if (i1 + i != i2 + 1)
+ {
+ *err = true;
+ return false;
+ }
+ else
+ return x;
+}
+
+
+/* Returns whether VAL is within the range given by the EXPR.
+ E.g. "30-40&!32-34|33-33" will return true only for the following values:
+ 30 31 33 35 36 37 38 39 40. The expression may consist only from the
+ numbers, operators "-", "&", "|" and "!". Ranges containing only the
+ single integer N should be written as "N-N", the expession should not
+ contain any spaces. If the expression is not valid, ERR is set to TRUE. */
+
+bool in_range_p (int val, const char *expr, bool *err)
+{
+ return in_range_p_1 (val, expr, 0, strlen (expr) - 1, err);
+}
+
+/* sel_sched_fix_param() is called from toplev.c upon detection
+ of the -fsel-insn-range=EXPR option. */
+void
+sel_sched_fix_param (const char *param, const char *val)
+{
+ if (!strcmp (param, "insn-range"))
+ flag_insn_range = val;
+ else
+ warning (0, "sel_sched_fix_param: unknown param: %s", param);
+}
+
+/* Returns whther AV contains rhs which insn uid equals to INSN_UID. */
+bool
+av_set_contains_insn_with_uid (av_set_t av, int insn_uid)
+{
+ av_set_iterator i;
+ rhs_t rhs;
+
+ FOR_EACH_RHS (rhs, i, av)
+ if (INSN_UID (EXPR_INSN_RTX (rhs)) == insn_uid)
+ return true;
+
+ return false;
+}
+
+av_set_t
+av_set_for_bb_n (int n)
+{
+ return BB_AV_SET (BASIC_BLOCK (n));
+}
+
diff --git a/gcc/sel-sched-dump.h b/gcc/sel-sched-dump.h
index afca4022e2c..036ace1be73 100644
--- a/gcc/sel-sched-dump.h
+++ b/gcc/sel-sched-dump.h
@@ -133,6 +133,7 @@ enum _dump_insn
| DUMP_INSN_PATTERN)
};
+extern const char *flag_insn_range;
extern void dump_insn_1 (insn_t, int);
extern void dump_insn (insn_t);
extern void debug_insn (insn_t);
@@ -214,6 +215,8 @@ extern void setup_dump_cfg_params (bool);
/* Debug functions. */
extern int insn_uid (rtx);
+extern basic_block block_for_insn (rtx insn);
+extern av_set_t bb_av_set (basic_block bb);
extern rtx insn_pattern (rtx);
extern int insn_code (rtx);
extern bool insn_is_set_p (rtx);
@@ -231,5 +234,8 @@ extern bool debug_find_insn_loop (void);
extern bool debug_find_unreachable_blocks (void);
extern void sel_debug_cfg (void);
extern void mem_test (int);
+extern bool in_range_p (int, const char *, bool *);
+extern bool av_set_contains_insn_with_uid (av_set_t av, int insn_uid);
+extern av_set_t av_set_for_bb_n (int n);
#endif
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index a69a2b834ee..1376c3e8b82 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -596,6 +596,7 @@ init_fences (insn_t old_fence)
1 /* cycle */, 0 /* cycle_issued_insns */,
1 /* starts_cycle_p */, 0 /* after_stall_p */);
}
+ free (succs);
}
/* Merges two fences (filling fields of OLD_FENCE with resulting values) by
@@ -1209,6 +1210,18 @@ sel_hash_rtx (rtx x, enum machine_mode mode)
}
break;
+ case PRE_DEC:
+ case PRE_INC:
+ case POST_DEC:
+ case POST_INC:
+ case PRE_MODIFY:
+ case POST_MODIFY:
+ case PC:
+ case CC0:
+ case CALL:
+ case UNSPEC_VOLATILE:
+ return hash;
+
case UNSPEC:
/* Skip UNSPECs when we are so told. */
if (targetm.sched.skip_rtx_p && targetm.sched.skip_rtx_p (x))
@@ -1493,6 +1506,8 @@ sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after)
{
insn_t new_insn;
+ gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true);
+
insn_init.what = INSN_INIT_WHAT_INSN;
new_insn = emit_insn_after (pattern, after);
@@ -1534,12 +1549,10 @@ sel_gen_insn_from_expr_after (rhs_t expr, int seqno, insn_t after)
gcc_assert (!INSN_IN_STREAM_P (insn));
insn_init.what = INSN_INIT_WHAT_INSN;
- add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
-
- /* Do not simplify insn if it is not a final schedule. */
- if (!pipelining_p)
- validate_simplify_insn (insn);
+ add_insn_after (insn, after, BLOCK_FOR_INSN (insn));
+ /* We simplify insns later, after scheduling region in
+ simplify_changed_insns. */
insn_init.todo = INSN_INIT_TODO_SSID;
set_insn_init (expr, EXPR_VINSN (expr), seqno);
@@ -1751,8 +1764,8 @@ copy_expr_onside (expr_t to, expr_t from)
init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from),
EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0,
EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, NULL,
- EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
- EXPR_WAS_RENAMED (from));
+ EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from),
+ EXPR_WAS_RENAMED (from));
}
/* Merge bits of FROM rhs to TO rhs. When JOIN_POINT_P is true,
@@ -1965,6 +1978,7 @@ mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set)
void
av_set_add (av_set_t *setp, expr_t expr)
{
+ gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr)));
_list_add (setp);
copy_expr (_AV_SET_EXPR (*setp), expr);
}
@@ -2071,17 +2085,11 @@ merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr)
if (expr2 != NULL)
{
- /* Prefer the expression that comes earlier, as it will be the one
- we will find. */
- if (later && EXPR_ORIG_BB_INDEX (expr)
- && EXPR_ORIG_BB_INDEX (expr) == EXPR_ORIG_BB_INDEX (expr2))
- {
- change_vinsn_in_expr (expr2, EXPR_VINSN (expr));
- EXPR_TARGET_AVAILABLE (expr2) = EXPR_TARGET_AVAILABLE (expr);
- }
-
+ /* Reset target availability on merge, since taking it only from one
+ of the exprs would be controversial for different code. */
+ EXPR_TARGET_AVAILABLE (expr2) = -1;
EXPR_USEFULNESS (expr2) = 0;
-
+
merge_expr (expr2, expr, false);
/* Fix usefulness as it should be now REG_BR_PROB_BASE. */
@@ -2216,11 +2224,26 @@ av_set_clear (av_set_t *setp)
gcc_assert (*setp == NULL);
}
-/* Remove all the elements of SETP except for the first one. */
+/* Leave only one non-speculative element in the SETP. */
void
-av_set_leave_one (av_set_t *setp)
+av_set_leave_one_nonspec (av_set_t *setp)
{
- av_set_clear (&_AV_SET_NEXT (*setp));
+ rhs_t rhs;
+ av_set_iterator i;
+ bool has_one_nonspec = false;
+
+ /* Keep all speculative exprs, and leave one non-speculative
+ (the first one). */
+ FOR_EACH_RHS_1 (rhs, i, setp)
+ {
+ if (!EXPR_SPEC_DONE_DS (rhs))
+ {
+ if (has_one_nonspec)
+ av_set_iter_remove (&i);
+ else
+ has_one_nonspec = true;
+ }
+ }
}
/* Return the N'th element of the SET. */
@@ -2529,10 +2552,13 @@ init_first_time_insn_data (insn_t insn)
/* This should not be set if this is the first time we init data for
insn. */
gcc_assert (first_time_insn_init (insn));
-
- INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
- INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
- init_deps (&INSN_DEPS_CONTEXT (insn));
+
+ if (!INSN_NOP_P (insn))
+ {
+ INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL);
+ INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL);
+ init_deps (&INSN_DEPS_CONTEXT (insn));
+ }
}
/* Free the same data as above for INSN. */
@@ -3273,11 +3299,18 @@ sel_remove_insn (insn_t insn)
clear_expr (INSN_EXPR (insn));
+ /* Empty bbs are not allowed to have LV_SETs. Free them in any case. */
if (sel_bb_empty_p (bb))
+ free_data_sets (bb);
+
+ /* Keep empty bb only if this block immediately precedes EXIT and
+ has incoming non-fallthrough edge. Otherwise remove it. */
+ if (sel_bb_empty_p (bb)
+ && !(single_succ_p (bb) && single_succ (bb) == EXIT_BLOCK_PTR
+ && (!single_pred_p (bb)
+ || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))))
/* Get rid of empty BB. */
{
- free_data_sets (bb);
-
if (single_succ_p (bb))
{
basic_block succ_bb;
@@ -3472,6 +3505,21 @@ extend_insn (void)
static void
finish_insns (void)
{
+ unsigned i;
+
+ /* Clear here all dependence contexts that may have left from insns that were
+ removed during the scheduling. */
+ for (i = 0; i < VEC_length (sel_insn_data_def, s_i_d); i++)
+ {
+ sel_insn_data_def *sid_entry = VEC_index (sel_insn_data_def, s_i_d, i);
+ if (sid_entry->analyzed_deps)
+ {
+ BITMAP_FREE (sid_entry->analyzed_deps);
+ BITMAP_FREE (sid_entry->found_deps);
+ free_deps (&sid_entry->deps_context);
+ }
+ }
+
VEC_free (sel_insn_data_def, heap, s_i_d);
deps_finish_d_i_d ();
}
@@ -3499,8 +3547,10 @@ sel_rtl_insn_added (insn_t insn)
/* Initialize a bit later because something (e.g. CFG) is not
consistent yet. These insns will be initialized when
- sel_init_new_insns () is called. */
- VEC_safe_push (rtx, heap, new_insns, insn);
+ sel_init_new_insns () is called. For those insns we add into
+ another region, they will be initialized in their own regions. */
+ if (adding_bb_to_current_region_p)
+ VEC_safe_push (rtx, heap, new_insns, insn);
}
/* Save original RTL hooks here. */
@@ -3712,6 +3762,15 @@ init_lv_set (basic_block bb)
BB_LV_SET_VALID_P (bb) = true;
}
+static void
+copy_lv_set_from (basic_block bb, basic_block from_bb)
+{
+ gcc_assert (!BB_LV_SET_VALID_P (bb));
+
+ COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb));
+ BB_LV_SET_VALID_P (bb) = true;
+}
+
/* Initialize lv set of all bb headers. */
void
init_lv_sets (void)
@@ -4376,50 +4435,6 @@ path_contains_switch_of_sched_times_p (insn_t insn, ilist_t p)
return false;
}
-/* Returns true if INSN is not a downward continuation of the given path P in
- the current stage. */
-bool
-is_ineligible_successor (insn_t insn, ilist_t p)
-{
- insn_t prev_insn;
-
- /* Check if insn is not deleted. */
- if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn)
- gcc_unreachable ();
- else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn)
- gcc_unreachable ();
-
- /* If it's the first insn visited, then the successor is ok. */
- if (!p)
- return false;
-
- prev_insn = ILIST_INSN (p);
-
- if (/* a backward edge. */
- INSN_SEQNO (insn) < INSN_SEQNO (prev_insn)
- /* is already visited. */
- || (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn)
- && (ilist_is_in_p (p, insn)
- /* We can reach another fence here and still seqno of insn
- would be equal to seqno of prev_insn. This is possible
- when prev_insn is a previously created bookkeeping copy.
- In that case it'd get a seqno of insn. Thus, check here
- whether insn is in current fence too. */
- || IN_CURRENT_FENCE_P (insn)))
- /* Was already scheduled on this round. */
- || (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn)
- && IN_CURRENT_FENCE_P (insn))
- /* An insn from another fence could also be
- scheduled earlier even if this insn is not in
- a fence list right now. Check INSN_SCHED_CYCLE instead. */
- || (!pipelining_p
- && INSN_SCHED_TIMES (insn) > 0))
-
- return true;
- else
- return false;
-}
-
/* Returns true when BB should be the end of an ebb. Adapted from the
code in sched-ebb.c. */
bool
@@ -5010,6 +5025,7 @@ sel_init_only_bb (basic_block bb, basic_block after)
{
gcc_assert (after == NULL);
+ extend_regions ();
rgn_make_new_region_out_of_new_block (bb);
}
@@ -5147,12 +5163,16 @@ sel_create_recovery_block (insn_t orig_insn)
basic_block first_bb;
basic_block second_bb;
basic_block recovery_block;
+ basic_block before_recovery = NULL;
first_bb = BLOCK_FOR_INSN (orig_insn);
second_bb = sched_split_block (first_bb, orig_insn);
can_add_real_insns_p = false;
- recovery_block = sched_create_recovery_block ();
+ recovery_block = sched_create_recovery_block (&before_recovery);
+ if (before_recovery)
+ copy_lv_set_from (before_recovery, EXIT_BLOCK_PTR);
+
can_add_real_insns_p = true;
gcc_assert (sel_bb_empty_p (recovery_block));
@@ -5987,7 +6007,7 @@ sel_remove_loop_preheader (void)
jump in PREV_BB that leads to the next basic block NEXT_BB.
If it is so - delete this jump and clear data sets of its
basic block if it becomes empty. */
- if (next_bb->prev_bb == prev_bb
+ if (next_bb->prev_bb == prev_bb
&& prev_bb != ENTRY_BLOCK_PTR
&& jump_leads_only_to_bb_p (BB_END (prev_bb), next_bb))
{
diff --git a/gcc/sel-sched-ir.h b/gcc/sel-sched-ir.h
index 6aa6b8bce11..1d40fd6bb3e 100644
--- a/gcc/sel-sched-ir.h
+++ b/gcc/sel-sched-ir.h
@@ -793,6 +793,8 @@ extern rtx exit_insn;
/* When false, only notes may be added. */
extern bool can_add_real_insns_p;
+/* FALSE if we add bb to another region, so we don't need to initialize it. */
+extern bool adding_bb_to_current_region_p;
@@ -921,6 +923,7 @@ extern bool enable_moveup_set_path_p;
extern bool enable_schedule_as_rhs_p;
extern bool pipelining_p;
extern bool bookkeeping_p;
+extern basic_block after_recovery;
extern int max_insns_to_rename;
extern bool preheader_removed;
@@ -1002,7 +1005,7 @@ extern av_set_t av_set_copy (av_set_t);
extern void av_set_union_and_clear (av_set_t *, av_set_t *);
extern void av_set_union_and_live (av_set_t *, av_set_t *, regset, regset);
extern void av_set_clear (av_set_t *);
-extern void av_set_leave_one (av_set_t *);
+extern void av_set_leave_one_nonspec (av_set_t *);
extern rhs_t av_set_element (av_set_t, int);
extern void av_set_substract_cond_branches (av_set_t *);
extern void av_set_split_usefulness (av_set_t *, int, int);
@@ -1031,8 +1034,6 @@ extern void sel_init_new_insns (void);
extern void sel_finish_new_insns (void);
extern bool bookkeeping_can_be_created_if_moved_through_p (insn_t);
-extern insn_t copy_insn_out_of_stream (vinsn_t);
-extern insn_t copy_insn_and_insert_before (insn_t, insn_t);
extern void sel_remove_insn (insn_t);
extern int vinsn_dfa_cost (vinsn_t, fence_t);
extern bool bb_header_p (insn_t);
@@ -1065,8 +1066,6 @@ extern insn_t cfg_succ (insn_t);
extern int overall_prob_of_succs (insn_t);
extern bool sel_num_cfg_preds_gt_1 (insn_t);
-extern bool is_ineligible_successor (insn_t, ilist_t);
-
extern bool bb_ends_ebb_p (basic_block);
extern bool in_same_ebb_p (insn_t, insn_t);
@@ -1398,7 +1397,7 @@ _succ_iter_cond (succ_iterator *ip, rtx *succp, rtx insn,
{
basic_block bb = ip->e2->dest;
- if (bb == EXIT_BLOCK_PTR)
+ if (bb == EXIT_BLOCK_PTR || bb == after_recovery)
*succp = exit_insn;
else
{
diff --git a/gcc/sel-sched.c b/gcc/sel-sched.c
index 2698dd41f7e..b13209ae130 100644
--- a/gcc/sel-sched.c
+++ b/gcc/sel-sched.c
@@ -187,6 +187,14 @@ int global_level;
/* Current fences. */
flist_t fences;
+/* Used in verify_target_availability to assert that target reg is reported
+ unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if
+ we haven't scheduled anything on the previous fence.
+ if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can
+ have more conservative value than the one returned by the
+ find_used_regs, thus we shouldn't assert that these values are equal. */
+static bool scheduled_something_on_previous_fence;
+
/* All newly emitted insns will have their uids greater than this value. */
static int first_emitted_uid;
@@ -212,6 +220,18 @@ DEF_VEC_P(rhs_t);
DEF_VEC_ALLOC_P(rhs_t,heap);
static VEC(rhs_t, heap) *vec_av_set = NULL;
+/* This vector has the exprs which may still present in av_sets, but actually
+ can't be moved up due to bookkeping created during code motion to another
+ fence. See comment near the call to update_and_record_unavailable_insns
+ for the detailed explanations. */
+static VEC(unsigned, heap) *vec_bk_blocked_exprs = NULL;
+
+/* Vector to store temporary nops inserted in move_op to prevent removal
+ of empty bbs. */
+DEF_VEC_P(insn_t);
+DEF_VEC_ALLOC_P(insn_t,heap);
+static VEC(insn_t, heap) *vec_temp_moveop_nops = NULL;
+
/* This shows how many times the scheduler was run. */
static int sel_sched_region_run = 0;
@@ -386,6 +406,13 @@ substitute_reg_in_rhs (rhs_t rhs, insn_t insn)
change_vinsn_in_expr (rhs, create_vinsn_from_insn_rtx (new_insn));
+ /* Do not allow clobbering the address register of speculative
+ insns. */
+ if ((EXPR_SPEC_DONE_DS (rhs) & SPECULATIVE)
+ && bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (rhs)),
+ expr_dest_regno (rhs)))
+ EXPR_TARGET_AVAILABLE (rhs) = false;
+
return true;
}
else
@@ -398,57 +425,6 @@ substitute_reg_in_rhs (rhs_t rhs, insn_t insn)
return false;
}
-/* Helper function for replace_in_vinsn_using_bitmask. See comment to that
- function for the details. */
-static int
-replace_in_vinsn_using_bitmask_1 (rtx *cur_rtx, void *arg)
-{
- struct rtx_search_arg *p = (struct rtx_search_arg *) arg;
-
- /* If matched... */
- if (exp_equiv_p (*cur_rtx, p->x, 0, true))
- {
- /* Check whether we should replace it according to bitmask. */
- if (p->bitmask & 1)
- {
- /* Replace. */
- (*cur_rtx) = copy_rtx (p->y);
- }
- /* Advance to next bit. */
- p->bitmask = p->bitmask >> 1;
-
- /* Finish search if no more occurences to replace, or continue search
- (not traversing subexprs of *CUR_RTX). */
- if (!p->bitmask)
- return 1;
- else
- return -1;
- }
- return 0;
-}
-
-/* Performs the substitution according to a BITMASK: every "one" with
- index K in bitmap mask means that the K-th occurence of WHAT will be
- replaced by BY_WHAT in WHERE, "0" means that it won't be changed. */
-static void
-replace_in_rtx_using_bitmask (rtx *wherep, rtx what, rtx by_what,
- unsigned long bitmask)
-{
- struct rtx_search_arg arg;
-
- gcc_assert (bitmask);
-
- arg.x = what;
- arg.y = by_what;
- arg.bitmask = bitmask;
-
- for_each_rtx (wherep,
- &replace_in_vinsn_using_bitmask_1, (void *) &arg);
-
- /* Make sure all occurences has been replaced. */
- gcc_assert (!arg.bitmask);
-}
-
/* Helper function for count_occurences_equiv. */
static int
count_occurrences_1 (rtx *cur_rtx, void *arg)
@@ -514,78 +490,6 @@ rtx_search (rtx what, rtx where)
{
return (count_occurrences_equiv (what, where, 1) > 0);
}
-
-/* if INSN is a copy x:=y and if there is an rhs r in AV that uses y,
- it adds all variants of r to AV derived by replacing one or more
- occurrences of y by x. */
-ATTRIBUTE_UNUSED static void
-un_substitute (rhs_t rhs, rtx insn, av_set_t *new_set_ptr)
-{
- rtx pat, src_reg, dst_reg;
- vinsn_t vi;
- rtx where_count;
- unsigned n_occur;
- unsigned long bitmask;
-
- if (!flag_sel_sched_substitution)
- return;
-
- /* Catch X := Y insns, where X and Y are regs. Otherwise return. */
- if (!insn_eligible_for_subst_p (insn))
- return;
-
- pat = PATTERN (insn);
- src_reg = SET_SRC (pat);
- dst_reg = SET_DEST (pat);
- vi = RHS_VINSN (rhs);
-
- where_count = (VINSN_SEPARABLE_P (vi)
- ? VINSN_RHS (vi) : VINSN_PATTERN (vi));
-
- n_occur = count_occurrences_equiv (src_reg, where_count, 0);
-
- /* Try next rhs, if no occurences. */
- if (!n_occur)
- return;
-
- /* Make sure we have enough bits to handle all substitutions. */
- gcc_assert (n_occur < sizeof (unsigned long) * 8);
-
- /* Generate the replacement mask and perform the substitution
- according to it. Every "one" with index K in bitmap mask means
- we change the K-th occurence of SRC_REG with DST_REG, "0" means we
- skip it. We already have the replacement configuration for
- bitmask == 0 in the original set. */
- for (bitmask = 1; bitmask < ((unsigned long)1 << n_occur); bitmask++)
- {
- expr_def _tmp_rhs, *tmp_rhs = &_tmp_rhs;
- rtx new_insn;
- vinsn_t new_vi;
- rtx *wherep;
-
- new_insn = create_copy_of_insn_rtx (VINSN_INSN (vi));
-
- wherep = (VINSN_SEPARABLE_P (vi)
- ? &SET_SRC (PATTERN (new_insn))
- : &PATTERN (new_insn));
-
- replace_in_rtx_using_bitmask (wherep, src_reg, dst_reg, bitmask);
-
- if (!insn_rtx_valid (new_insn))
- continue;
-
- new_vi = create_vinsn_from_insn_rtx (new_insn);
-
- gcc_assert (VINSN_SEPARABLE_P (new_vi) == EXPR_SEPARABLE_P (rhs));
-
- copy_expr (tmp_rhs, rhs);
- change_vinsn_in_expr (tmp_rhs, new_vi);
-
- av_set_add (new_set_ptr, tmp_rhs);
-
- clear_expr (tmp_rhs);
- }
-}
/* Functions to support register renaming. */
@@ -1284,7 +1188,12 @@ verify_target_availability (expr_t expr, regset used_regs,
if (target_available)
gcc_assert (live_available);
else
- gcc_assert (!live_available || !hard_available);
+ /* Check only if we haven't scheduled something on the previous fence,
+ cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues
+ and having more than one fence, we may end having targ_un in a block
+ in which successors target register is actually available. */
+ gcc_assert (scheduled_something_on_previous_fence || !live_available
+ || !hard_available);
}
/* Returns best register for given rhs, or NULL_RTX, if no register can be
@@ -1631,53 +1540,6 @@ speculate_expr (expr_t expr, ds_t ds)
}
}
-/* Return true if there is a speculative dependence between INSN and EXPR. */
-static ds_t
-has_spec_dependence_p (expr_t expr, insn_t insn)
-{
- ds_t *has_dep_p;
- ds_t full_ds;
-
- full_ds = has_dependence_p (expr, insn, &has_dep_p);
-
- if (full_ds == 0)
- return 0;
-
- if (EXPR_SEPARABLE_P (expr)
- && can_overcome_dep_p (has_dep_p [DEPS_IN_RHS]))
- return has_dep_p [DEPS_IN_RHS];
-
- if (can_overcome_dep_p (full_ds))
- return full_ds;
-
- return 0;
-}
-
-/* Record speculations that EXPR should perform in order to be moved through
- INSN. */
-ATTRIBUTE_UNUSED static void
-un_speculate (expr_t expr, insn_t insn)
-{
- ds_t expr_spec_done_ds;
- ds_t full_ds;
-
- if (spec_info == NULL || !sel_speculation_p)
- return;
-
- expr_spec_done_ds = EXPR_SPEC_DONE_DS (expr);
- if (expr_spec_done_ds == 0)
- return;
-
- full_ds = has_spec_dependence_p (expr, insn);
-
- if (full_ds == 0)
- return;
-
- full_ds = ds_get_speculation_types (full_ds);
- EXPR_SPEC_TO_CHECK_DS (expr) |= full_ds;
-}
-
-
/* True when INSN is a "regN = regN" copy. */
static bool
identical_copy_p (rtx insn)
@@ -1732,59 +1594,59 @@ undo_transformations (av_set_t *av_ptr, rtx insn)
int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (rhs),
insn, EXPR_VINSN (rhs), true);
- if (index >= 0 && VEC_index (expr_history_def,
- EXPR_HISTORY_OF_CHANGES (rhs),
- index)->type == TRANS_SPECULATION)
+ if (index >= 0)
{
- ds_t old_ds, new_ds;
-
- old_ds = VEC_index (expr_history_def,
- EXPR_HISTORY_OF_CHANGES (rhs),
- index)->spec_ds;
- new_ds = EXPR_SPEC_DONE_DS (rhs);
- gcc_assert (spec_info && sel_speculation_p
- && new_ds
- && (old_ds & SPECULATIVE) != (new_ds & SPECULATIVE));
-
- old_ds &= SPECULATIVE;
- new_ds &= SPECULATIVE;
- new_ds &= ~old_ds;
-
- EXPR_SPEC_TO_CHECK_DS (rhs) |= new_ds;
- }
- }
-
- new_set = NULL;
-
- FOR_EACH_RHS (rhs, av_iter, *av_ptr)
- {
- int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (rhs),
- insn, EXPR_VINSN (rhs), true);
+ expr_history_def *phist;
- if (index >= 0 && VEC_index (expr_history_def,
- EXPR_HISTORY_OF_CHANGES (rhs),
- index)->type == TRANS_SUBSTITUTION)
- {
- expr_def _tmp_rhs, *tmp_rhs = &_tmp_rhs;
- vinsn_t new_vi;
-
+ phist = VEC_index (expr_history_def,
+ EXPR_HISTORY_OF_CHANGES (rhs),
+ index);
- new_vi = VEC_index (expr_history_def,
- EXPR_HISTORY_OF_CHANGES (rhs),
- index)->old_expr_vinsn;
+ switch (phist->type)
+ {
+ case TRANS_SPECULATION:
+ {
+ ds_t old_ds, new_ds;
+
+ old_ds = phist->spec_ds;
+ new_ds = EXPR_SPEC_DONE_DS (rhs);
+ gcc_assert (spec_info && sel_speculation_p
+ && new_ds
+ && ((old_ds & SPECULATIVE)
+ != (new_ds & SPECULATIVE)));
+
+ old_ds &= SPECULATIVE;
+ new_ds &= SPECULATIVE;
+ new_ds &= ~old_ds;
+
+ EXPR_SPEC_TO_CHECK_DS (rhs) |= new_ds;
+ break;
+ }
+ case TRANS_SUBSTITUTION:
+ {
+ expr_def _tmp_rhs, *tmp_rhs = &_tmp_rhs;
+ vinsn_t new_vi;
+
+
+ new_vi = phist->old_expr_vinsn;
- gcc_assert (VINSN_SEPARABLE_P (new_vi)
- == EXPR_SEPARABLE_P (rhs));
-
- copy_expr (tmp_rhs, rhs);
- change_vinsn_in_expr (tmp_rhs, new_vi);
-
- av_set_add (&new_set, tmp_rhs);
- clear_expr (tmp_rhs);
+ gcc_assert (VINSN_SEPARABLE_P (new_vi)
+ == EXPR_SEPARABLE_P (rhs));
+
+ copy_expr (tmp_rhs, rhs);
+ change_vinsn_in_expr (tmp_rhs, new_vi);
+
+ av_set_add (&new_set, tmp_rhs);
+ clear_expr (tmp_rhs);
+ break;
+ }
+ default:
+ gcc_unreachable ();
+ }
}
-
+
}
-
+
av_set_union_and_clear (av_ptr, &new_set);
}
@@ -2216,6 +2078,49 @@ equal_after_moveup_path_p (rhs_t cur_rhs, ilist_t path, rhs_t rhs_vliw)
/* Functions that compute av and lv sets. */
+/* Returns true if INSN is not a downward continuation of the given path P in
+ the current stage. */
+static bool
+is_ineligible_successor (insn_t insn, ilist_t p)
+{
+ insn_t prev_insn;
+
+ /* Check if insn is not deleted. */
+ if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn)
+ gcc_unreachable ();
+ else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn)
+ gcc_unreachable ();
+
+ /* If it's the first insn visited, then the successor is ok. */
+ if (!p)
+ return false;
+
+ prev_insn = ILIST_INSN (p);
+
+ if (/* a backward edge. */
+ INSN_SEQNO (insn) < INSN_SEQNO (prev_insn)
+ /* is already visited. */
+ || (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn)
+ && (ilist_is_in_p (p, insn)
+ /* We can reach another fence here and still seqno of insn
+ would be equal to seqno of prev_insn. This is possible
+ when prev_insn is a previously created bookkeeping copy.
+ In that case it'd get a seqno of insn. Thus, check here
+ whether insn is in current fence too. */
+ || IN_CURRENT_FENCE_P (insn)))
+ /* Was already scheduled on this round. */
+ || (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn)
+ && IN_CURRENT_FENCE_P (insn))
+ /* An insn from another fence could also be
+ scheduled earlier even if this insn is not in
+ a fence list right now. Check INSN_SCHED_CYCLE instead. */
+ || (!pipelining_p
+ && INSN_SCHED_TIMES (insn) > 0))
+ return true;
+ else
+ return false;
+}
+
/* Compute av set before INSN.
INSN - the current operation (actual rtx INSN)
P - the current path, which is list of insns visited so far
@@ -2326,6 +2231,7 @@ compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p)
/* We will edit SUCC_SET and RHS_SPEC field of its elements. */
succ_set = compute_av_set (succs[succ], p, ws + 1, true);
+
av_set_split_usefulness (&succ_set, probs[succ], all_prob);
if (real_succs_n > 1)
@@ -2380,9 +2286,12 @@ compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p)
/* Finally, check liveness restrictions on paths leaving the region. */
if (other_succs_n > 0)
- for (succ = 0; succ < other_succs_n; succ++)
- mark_unavailable_targets
- (av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (other_succs[succ])));
+ {
+ for (succ = 0; succ < other_succs_n; succ++)
+ mark_unavailable_targets
+ (av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (other_succs[succ])));
+ free (other_succs);
+ }
if (real_succs_n > 1)
{
@@ -2716,9 +2625,15 @@ find_used_regs_1 (insn_t insn, av_set_t orig_ops, ilist_t path,
return 0;
}
- /* !!! When using different types of speculation we must not leave
- just one element in orig_ops. */
- /*av_set_leave_one (&orig_ops);*/
+ /* For non-speculative insns we have to leave only one form of the
+ original operation, because if we don't, we may end up with
+ different C_RHSes and, consequently, with bookkeepings for different
+ expression forms along the same code motion path. That may lead to
+ generation of incorrect code. So for each code motion we stick to
+ the single form of the instruction, except for speculative insns
+ which we need to keep in different forms with all speculation
+ types. */
+ av_set_leave_one_nonspec (&orig_ops);
}
if (CALL_P (insn))
@@ -2798,7 +2713,6 @@ find_used_regs_1 (insn_t insn, av_set_t orig_ops, ilist_t path,
/* Av set ops could have changed when moving through this insn.
To find them below it, we have to un-speculate and un-substitute
them. */
-
undo_transformations (&orig_ops, insn);
/* If all original operands have been filtered on this branch,
@@ -3282,6 +3196,30 @@ process_use_exprs (av_set_t *av_ptr, blist_t bnds)
return NULL;
}
+/* Lookup EXPR in VEC_BK_BLOCKED_EXPRS and return TRUE if found. */
+static bool
+expr_blocked_by_bookkeeping (expr_t expr)
+{
+ unsigned hash;
+ int n;
+
+ for (n = 0; VEC_iterate (unsigned, vec_bk_blocked_exprs, n, hash); n++)
+ if (hash == VINSN_HASH (EXPR_VINSN (expr)))
+ return true;
+
+ return false;
+}
+
+/* Clear VEC_BK_BLOCKED_EXPRS from old exprs. */
+static void
+vec_bk_blocked_exprs_clear (void)
+{
+ /* Clear the vector. */
+ if (VEC_length (unsigned, vec_bk_blocked_exprs) > 0)
+ VEC_block_remove (unsigned, vec_bk_blocked_exprs, 0,
+ VEC_length (unsigned, vec_bk_blocked_exprs));
+}
+
/* Turn AV into a vector, filter inappropriate insns and sort it. Return
true if there is something to schedule. */
static bool
@@ -3371,6 +3309,22 @@ fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence)
FIXME: try to minimize calls to this. */
target_available = EXPR_TARGET_AVAILABLE (rhs);
+ /* If the availability of the RHS is invalidated by the insertion of
+ bookkeeping earlier, make sure that we won't choose this rhs for
+ scheduling if it's not separable, and if it is separable, then
+ we have to recompute the set of available registers for it. */
+ if (expr_blocked_by_bookkeeping (rhs))
+ {
+ if (!EXPR_SEPARABLE_P (rhs))
+ {
+ VEC_unordered_remove (rhs_t, vec_av_set, n);
+ print ("- rhs is blocked by bookkeeping inserted earlier; ");
+ continue;
+ }
+ else
+ target_available = -1;
+ }
+
if (target_available == true)
{
/* Do nothing -- we can use an existing register. */
@@ -3392,7 +3346,7 @@ fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence)
continue;
}
else
- {
+ {
fail++;
if (/* Either we don't know the answer, or the register is
not available in separable RHS. Do it the hard way. */
@@ -4291,6 +4245,27 @@ move_cond_jump (rtx insn, bnd_t bnd)
line_finish ();
}
+/* Remove nops generated during move_op for preventing removal of empty
+ basic blocks. */
+static void
+remove_temp_moveop_nops (void)
+{
+ int i;
+ insn_t insn;
+
+ for (i = 0; VEC_iterate (insn_t, vec_temp_moveop_nops, i, insn); i++)
+ {
+ gcc_assert (INSN_NOP_P (insn));
+ return_nop_to_pool (insn);
+ }
+
+ /* Empty the vector. */
+ if (VEC_length (insn_t, vec_temp_moveop_nops) > 0)
+ VEC_block_remove (insn_t, vec_temp_moveop_nops, 0,
+ VEC_length (insn_t, vec_temp_moveop_nops));
+
+}
+
/* Records the number of fill_insns runs for debugging purposes. */
static int fill_insns_run = 0;
@@ -4387,6 +4362,17 @@ fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
else
do_p = (start > now) || (now > stop);
+ /* If more advanced --param insn-range was specified, use only it. */
+ if (flag_insn_range)
+ {
+ bool err = false;
+
+ do_p = in_range_p (now, flag_insn_range, &err);
+ /* Error may be caused by invalid expression. Note that the
+ valid expression shouldn't contain any spaces. */
+ gcc_assert (!err);
+ }
+
if (!do_p)
/* Leave only the next insn in av_vliw. */
{
@@ -4624,6 +4610,7 @@ fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
b = move_op (BND_TO (bnd), rhs_seq, NULL, NULL, NULL,
get_dest_from_orig_ops (rhs_seq), c_rhs);
+ remove_temp_moveop_nops ();
if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
stat_insns_needed_bookkeeping++;
@@ -4705,6 +4692,7 @@ fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
memcpy (temp_state, FENCE_STATE (fence), dfa_state_size);
INSN_AFTER_STALL_P (insn) = FENCE_AFTER_STALL_P (fence);
INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence);
+ EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true;
if (asm_p)
advance_one_cycle (fence);
@@ -4764,6 +4752,7 @@ fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
/* Indicate that we've scheduled something on this fence. */
FENCE_SCHEDULED_SOMETHING (fence) = true;
+ scheduled_something_on_previous_fence = true;
/* When can_issue_more is 0, variable_issue tells us that we should
advance a cycle. */
@@ -4852,6 +4841,41 @@ get_dest_from_orig_ops (av_set_t orig_ops)
return dest;
}
+/* Update data sets for the bookkeeping block and record those expressions
+ which become no longer available after inserting this bookkeeping. */
+static void
+update_and_record_unavailable_insns (basic_block book_block)
+{
+ av_set_iterator i;
+ av_set_t old_av_set = NULL;
+ rhs_t cur_rhs;
+
+ /* If there's valid av_set on BOOK_BLOCK, then there might exist another
+ fence above, where we may choose to schedule an insn which is
+ actually blocked from moving up with the bookkeeping we create here. */
+ if (AV_SET_VALID_P (sel_bb_head (book_block)))
+ {
+ old_av_set = av_set_copy (BB_AV_SET (book_block));
+
+ update_data_sets (sel_bb_head (book_block));
+
+
+ /* Traverse all the expressions in the old av_set and check whether
+ CUR_RHS is in new AV_SET. */
+ FOR_EACH_RHS (cur_rhs, i, old_av_set)
+ if (!av_set_lookup (BB_AV_SET (book_block), EXPR_VINSN (cur_rhs)))
+ {
+ gcc_assert (!VINSN_SEPARABLE_P (EXPR_VINSN (cur_rhs)));
+ VEC_safe_push (unsigned, heap, vec_bk_blocked_exprs,
+ VINSN_HASH (EXPR_VINSN (cur_rhs)));
+ }
+
+ av_set_clear (&old_av_set);
+ }
+ else
+ update_data_sets (sel_bb_head (book_block));
+}
+
/* Move up the operations from ORIG_OPS set traversing
the dag started from INSN. PATH represents the edges traversed so far.
REG is the register chosen for scheduling the current rhs. Insert
@@ -4864,7 +4888,9 @@ move_op (insn_t insn, av_set_t orig_ops, ilist_t path, edge e1, edge e2,
bool c_rhs_inited_p;
bool generated_nop_p = false;
basic_block book_block = NULL;
-
+ insn_t nop = NULL;
+ bool call_update_data_sets_on_nop = false;
+
line_start ();
print ("move_op(");
dump_insn (insn);
@@ -4907,9 +4933,15 @@ move_op (insn_t insn, av_set_t orig_ops, ilist_t path, edge e1, edge e2,
return false;
}
- /* !!! When using different types of speculation we must not leave
- just one element in orig_ops. */
- /*av_set_leave_one (&orig_ops);*/
+ /* For non-speculative insns we have to leave only one form of the
+ original operation, because if we don't, we may end up with
+ different C_RHSes and, consequently, with bookkeepings for different
+ expression forms along the same code motion path. That may lead to
+ generation of incorrect code. So for each code motion we stick to
+ the single form of the instruction, except for speculative insns
+ which we need to keep in different forms with all speculation
+ types. */
+ av_set_leave_one_nonspec (&orig_ops);
}
/* Look at the insn and decide if it could be an ancestor of currently
@@ -5004,7 +5036,16 @@ move_op (insn_t insn, av_set_t orig_ops, ilist_t path, edge e1, edge e2,
{
insn_t x;
-
+ basic_block bb = BLOCK_FOR_INSN (insn);
+ /* If INSN is the only insn in the basic block (not counting JUMP,
+ which may be a jump to next insn), leave NOP there till the
+ return to fill_insns. */
+
+ bool need_nop_to_preserve_bb = (((sel_bb_head (bb) == sel_bb_end (bb))
+ || (NEXT_INSN (sel_bb_head (bb)) == sel_bb_end (bb)
+ && JUMP_P (sel_bb_end (bb)))))
+ && !sel_num_cfg_preds_gt_1 (sel_bb_head (bb));
+
if (!recovery_p)
{
x = get_nop_from_pool (insn);
@@ -5014,6 +5055,28 @@ move_op (insn_t insn, av_set_t orig_ops, ilist_t path, edge e1, edge e2,
else
x = NEXT_INSN (insn);
+ /* If there's only one insn in the BB, make sure that a nop is
+ inserted into it, so the basic block won't disappear when we'll
+ delete INSN below with sel_remove_insn. It should also survive
+ till the return to fill_insns, so if the nop was created locally
+ in move_op to retain data sets, reset GENERATED_NOP so it won't
+ be deleted at the exit of this move_op. */
+ if (need_nop_to_preserve_bb)
+ {
+ if (!generated_nop_p)
+ {
+ nop = get_nop_from_pool (insn);
+ call_update_data_sets_on_nop = true;
+ }
+ else
+ {
+ nop = x;
+ generated_nop_p = false;
+ }
+ gcc_assert (INSN_NOP_P (nop));
+ VEC_safe_push (insn_t, heap, vec_temp_moveop_nops, nop);
+ }
+
/* For the insns that don't have rhs just remove insn from the
stream. Also remove insn if substituting it's right hand
side would result in operation like reg:=reg. This kind of
@@ -5140,7 +5203,9 @@ move_op (insn_t insn, av_set_t orig_ops, ilist_t path, edge e1, edge e2,
gcc_assert (sel_bb_head_p (insn));
}
- if (sel_bb_head_p (insn))
+ if (call_update_data_sets_on_nop)
+ update_data_sets (nop);
+ else if (sel_bb_head_p (insn))
update_data_sets (insn);
else
gcc_assert (AV_LEVEL (insn) == INSN_WS_LEVEL (insn));
@@ -5185,10 +5250,19 @@ move_op (insn_t insn, av_set_t orig_ops, ilist_t path, edge e1, edge e2,
and bookkeeping code was generated at the bookeeping block. This
way insn "r1 := r2" is no longer available as a whole instruction
(but only as rhs) ahead of insn "r1 := r3" in bookkeeping block.
- This situation is handled by calling update_data_sets. */
+ This situation is handled by calling update_data_sets.
+ Since update_data_sets is called only on the bookkeeping block, and
+ it also may have predecessors with av_sets, containing instructions that
+ are no longer available, we save all such expressions that become
+ unavailable during data sets update on the bookkeeping block in
+ VEC_BK_BLOCKED_EXPRS. Later we avoid selecting such expressions for
+ scheduling. This allows us to avoid recomputation of av_sets outside
+ the code motion path. The exprs that are blocked by the bookkeeping
+ are non-separable exprs, since for all other exprs we can just choose
+ another register. */
if (book_block)
- update_data_sets (sel_bb_head (book_block));
+ update_and_record_unavailable_insns (book_block);
/* If INSN was previously marked for deletion, it's time to do it. */
if (generated_nop_p)
@@ -5198,9 +5272,11 @@ move_op (insn_t insn, av_set_t orig_ops, ilist_t path, edge e1, edge e2,
gcc_assert (INSN_NOP_P (insn));
/* Check if there is a unnecessary jump after insn left. */
- if (jump_leads_only_to_bb_p (BB_END (xbb), xbb->next_bb))
+ if (jump_leads_only_to_bb_p (BB_END (xbb), xbb->next_bb)
+ && INSN_SCHED_TIMES (BB_END (xbb)) == 0
+ && !IN_CURRENT_FENCE_P (BB_END (xbb)))
{
- sel_remove_insn (BB_END (xbb));
+ sel_remove_insn (BB_END (xbb));
tidy_fallthru_edge (EDGE_SUCC (xbb, 0));
}
@@ -5471,12 +5547,36 @@ sel_region_init (int rgn)
return false;
}
+/* Simplify insns after the scheduling. */
+static void
+simplify_changed_insns (void)
+{
+ int i;
+
+ for (i = 0; i < current_nr_blocks; i++)
+ {
+ basic_block bb = BASIC_BLOCK (BB_TO_BLOCK (i));
+ rtx insn;
+
+ FOR_BB_INSNS (bb, insn)
+ if (INSN_P (insn))
+ {
+ expr_t expr = INSN_EXPR (insn);
+
+ if (EXPR_WAS_SUBSTITUTED (expr))
+ validate_simplify_insn (insn);
+ }
+ }
+}
+
/* Free the scheduling data for the current region. */
static void
sel_region_finish (void)
{
int i;
+ simplify_changed_insns ();
+
sel_finish_new_insns ();
sched_finish_ready_list ();
@@ -5489,6 +5589,9 @@ sel_region_finish (void)
BITMAP_FREE (current_copies);
BITMAP_FREE (current_originators);
+ if (vec_bk_blocked_exprs)
+ VEC_free (unsigned, heap, vec_bk_blocked_exprs);
+
/* If LV_SET of the region head should be updated, do it now because
there will be no other chance. */
{
@@ -5782,6 +5885,7 @@ sel_sched_region_2 (sel_sched_region_2_data_t data)
ilist_iterator ii;
insn_t insn;
+ scheduled_something_on_previous_fence = false;
flist_tail_init (new_fences);
line_start ();
@@ -5928,6 +6032,10 @@ sel_sched_region_2 (sel_sched_region_2_data_t data)
highest_seqno_in_use = new_hs;
global_level++;
+
+ /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
+ don't need to keep bookkeeping-invalidated exprs any more. */
+ vec_bk_blocked_exprs_clear ();
}
gcc_assert (data->orig_max_seqno == orig_max_seqno);
@@ -6112,7 +6220,6 @@ sel_sched_region_1 (void)
}
/* Reschedule pipelined code without pipelining. */
-
for (i = BLOCK_TO_BB (loop_entry->index); i < current_nr_blocks; i++)
{
insn_t insn, next_tail;
@@ -6252,6 +6359,9 @@ sel_global_init (void)
VEC_free (basic_block, heap, bbs);
}
+ /* Reset AFTER_RECOVERY if it has been set by the 1st scheduler pass. */
+ after_recovery = 0;
+
sched_extend_target ();
sched_deps_init (true);