aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Matz <matz@suse.de>2005-02-17 21:48:41 +0000
committerMichael Matz <matz@suse.de>2005-02-17 21:48:41 +0000
commit4876b4ad5d5baf62c39f86c8b786eb8b78fd3c6f (patch)
treef91d5ecfe69ccc9e6562d5d92d21c9154623b50d
parentd8ca86b5384b1aa0a70a34f41b0c4c57f36dc25c (diff)
2004-11-03 Michael Matz <matz@suse.de>
* ra-build.c (select_regclass): Clear usable_regs for call-crossing webs. (web_class_spill): #if 0 around debug code. * ra.c (first_hard_reg): New function. (single_reg_in_regclass): New variable. (init_ra): Initialize it. * ra-colorize.c (ok_class): New function. (coalesce, aggressive_coalesce, extended_coalesce_2): Use it. * ra.h (single_reg_in_regclass): Declare. * pre-reload.c (pre_reload_decompose): Silence uninit warning. 2004-11-03 Michael Matz <matz@suse.de> Merge to HEAD at tree-cleanup-merge-20041024 . git-svn-id: https://gcc.gnu.org/svn/gcc/branches/new-regalloc-branch@95197 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog.RA19
-rw-r--r--gcc/df.c1463
-rw-r--r--gcc/df.h96
-rw-r--r--gcc/pre-reload.c284
-rw-r--r--gcc/pre-reload.h4
-rw-r--r--gcc/ra-build.c353
-rw-r--r--gcc/ra-colorize.c322
-rw-r--r--gcc/ra-debug.c110
-rw-r--r--gcc/ra-rewrite.c422
-rw-r--r--gcc/ra.c202
-rw-r--r--gcc/ra.h23
11 files changed, 1780 insertions, 1518 deletions
diff --git a/gcc/ChangeLog.RA b/gcc/ChangeLog.RA
index 17b63b835e0..d2aa579e365 100644
--- a/gcc/ChangeLog.RA
+++ b/gcc/ChangeLog.RA
@@ -1,3 +1,20 @@
+2004-11-03 Michael Matz <matz@suse.de>
+
+ * ra-build.c (select_regclass): Clear usable_regs for call-crossing
+ webs.
+ (web_class_spill): #if 0 around debug code.
+ * ra.c (first_hard_reg): New function.
+ (single_reg_in_regclass): New variable.
+ (init_ra): Initialize it.
+ * ra-colorize.c (ok_class): New function.
+ (coalesce, aggressive_coalesce, extended_coalesce_2): Use it.
+ * ra.h (single_reg_in_regclass): Declare.
+ * pre-reload.c (pre_reload_decompose): Silence uninit warning.
+
+2004-11-03 Michael Matz <matz@suse.de>
+
+ Merge to HEAD at tree-cleanup-merge-20041024 .
+
2004-04-02 Michael Matz <matz@suse.de>
* df.c (df_def_record_1, df_uses_record): Set DF_REF_MODE_CHANGE.
@@ -1818,7 +1835,7 @@ Mon Jul 30 20:21:07 2001 Denis Chertykov <denisc@overta.ru>
(allocate_spill_web): Don't abort if already having a stack slot.
(rewrite_program): Deal with webs coalesced to spilled ones.
-2001-06-30 Michael Matz <matzmich@cv.tu-berlin.de>
+2001-06-30 Michael Matz <matzmich@cs.tu-berlin.de>
* ra.c : (colorize_one_web): Two passes over conflicts trying to
spill colored webs. First potential-spill ones, then others.
diff --git a/gcc/df.c b/gcc/df.c
index 43d9ce432fb..7bfad3b99e3 100644
--- a/gcc/df.c
+++ b/gcc/df.c
@@ -1,5 +1,6 @@
/* Dataflow support routines.
- Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
+ Free Software Foundation, Inc.
Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz,
mhayes@redhat.com)
@@ -45,7 +46,7 @@ Here's an example of using the dataflow routines.
df = df_init ();
- df_analyse (df, 0, DF_ALL);
+ df_analyze (df, 0, DF_ALL);
df_dump (df, DF_ALL, stderr);
@@ -54,10 +55,10 @@ Here's an example of using the dataflow routines.
df_init simply creates a poor man's object (df) that needs to be
passed to all the dataflow routines. df_finish destroys this
-object and frees up any allocated memory. DF_ALL says to analyse
+object and frees up any allocated memory. DF_ALL says to analyze
everything.
-df_analyse performs the following:
+df_analyze performs the following:
1. Records defs and uses by scanning the insns in each basic block
or by scanning the insns queued by df_insn_modify.
@@ -82,7 +83,7 @@ deleted or created insn. If the dataflow information requires
updating then all the changed, new, or deleted insns needs to be
marked with df_insn_modify (or df_insns_modify) either directly or
indirectly (say through calling df_insn_delete). df_insn_modify
-marks all the modified insns to get processed the next time df_analyse
+marks all the modified insns to get processed the next time df_analyze
is called.
Beware that tinkering with insns may invalidate the dataflow information.
@@ -90,7 +91,7 @@ The philosophy behind these routines is that once the dataflow
information has been gathered, the user should store what they require
before they tinker with any insn. Once a reg is replaced, for example,
then the reg-def/reg-use chains will point to the wrong place. Once a
-whole lot of changes have been made, df_analyse can be called again
+whole lot of changes have been made, df_analyze can be called again
to update the dataflow information. Currently, this is not very smart
with regard to propagating changes to the dataflow so it should not
be called very often.
@@ -127,7 +128,7 @@ When shadowing loop mems we create new uses and defs for new pseudos
so we do not affect the existing dataflow information.
My current strategy is to queue up all modified, created, or deleted
-insns so when df_analyse is called we can easily determine all the new
+insns so when df_analyze is called we can easily determine all the new
or deleted refs. Currently the global dataflow information is
recomputed from scratch but this could be propagated more efficiently.
@@ -150,7 +151,7 @@ Similarly, should the first entry in the use list be the last use
Often the whole CFG does not need to be analyzed, for example,
when optimizing a loop, only certain registers are of interest.
-Perhaps there should be a bitmap argument to df_analyse to specify
+Perhaps there should be a bitmap argument to df_analyze to specify
which registers should be analyzed?
@@ -186,14 +187,17 @@ and again mark them read/write.
#include "sbitmap.h"
#include "bitmap.h"
#include "df.h"
-#include "fibheap.h"
#define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
do \
{ \
unsigned int node_; \
- EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, \
- {(BB) = BASIC_BLOCK (node_); CODE;}); \
+ bitmap_iterator __bi_; \
+ EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, __bi_) \
+ { \
+ (BB) = BASIC_BLOCK (node_); \
+ CODE; \
+ } \
} \
while (0)
@@ -203,12 +207,12 @@ static struct df *ddf;
static void df_reg_table_realloc (struct df *, int);
static void df_insn_table_realloc (struct df *, unsigned int);
-static void df_bitmaps_alloc (struct df *, int);
+static void df_bb_table_realloc (struct df *, unsigned int);
+static void df_bitmaps_alloc (struct df *, bitmap, int);
static void df_bitmaps_free (struct df *, int);
static void df_free (struct df *);
static void df_alloc (struct df *, int);
-static rtx df_reg_clobber_gen (unsigned int);
static rtx df_reg_use_gen (unsigned int);
static inline struct df_link *df_link_create (struct ref *, struct df_link *);
@@ -238,14 +242,14 @@ static void df_bb_refs_record (struct df *, basic_block);
static void df_refs_record (struct df *, bitmap);
static void df_bb_reg_def_chain_create (struct df *, basic_block);
-static void df_reg_def_chain_create (struct df *, bitmap);
+static void df_reg_def_chain_create (struct df *, bitmap, bool);
static void df_bb_reg_use_chain_create (struct df *, basic_block);
-static void df_reg_use_chain_create (struct df *, bitmap);
+static void df_reg_use_chain_create (struct df *, bitmap, bool);
static void df_bb_du_chain_create (struct df *, basic_block, bitmap);
static void df_du_chain_create (struct df *, bitmap);
static void df_bb_ud_chain_create (struct df *, basic_block);
static void df_ud_chain_create (struct df *, bitmap);
-static void df_bb_rd_local_compute (struct df *, basic_block);
+static void df_bb_rd_local_compute (struct df *, basic_block, bitmap);
static void df_rd_local_compute (struct df *, bitmap);
static void df_bb_ru_local_compute (struct df *, basic_block);
static void df_ru_local_compute (struct df *, bitmap);
@@ -261,8 +265,8 @@ static int df_modified_p (struct df *, bitmap);
static int df_refs_queue (struct df *);
static int df_refs_process (struct df *);
static int df_bb_refs_update (struct df *, basic_block);
-static int df_refs_update (struct df *);
-static void df_analyse_1 (struct df *, bitmap, int, int);
+static int df_refs_update (struct df *, bitmap);
+static void df_analyze_1 (struct df *, bitmap, int, int);
static void df_insns_modify (struct df *, basic_block, rtx, rtx);
static int df_rtx_mem_replace (rtx *, void *);
@@ -271,10 +275,6 @@ void df_refs_reg_replace (struct df *, bitmap, struct df_link *, rtx, rtx);
static int df_def_dominates_all_uses_p (struct df *, struct ref *def);
static int df_def_dominates_uses_p (struct df *, struct ref *def, bitmap);
-static struct ref *df_bb_regno_last_use_find (struct df *, basic_block,
- unsigned int);
-static struct ref *df_bb_regno_first_def_find (struct df *, basic_block,
- unsigned int);
static struct ref *df_bb_insn_regno_last_use_find (struct df *, basic_block,
rtx, unsigned int);
static struct ref *df_bb_insn_regno_first_def_find (struct df *, basic_block,
@@ -284,22 +284,14 @@ static void df_chain_dump (struct df_link *, FILE *file);
static void df_chain_dump_regno (struct df_link *, FILE *file);
static void df_regno_debug (struct df *, unsigned int, FILE *);
static void df_ref_debug (struct df *, struct ref *, FILE *);
-static void df_rd_transfer_function (int, int *, bitmap, bitmap, bitmap,
- bitmap, void *);
-static void df_ru_transfer_function (int, int *, bitmap, bitmap, bitmap,
- bitmap, void *);
-static void df_lr_transfer_function (int, int *, bitmap, bitmap, bitmap,
- bitmap, void *);
-static void hybrid_search_bitmap (basic_block, bitmap *, bitmap *,
- bitmap *, bitmap *, enum df_flow_dir,
- enum df_confluence_op,
- transfer_function_bitmap,
- sbitmap, sbitmap, void *);
-static void hybrid_search_sbitmap (basic_block, sbitmap *, sbitmap *,
- sbitmap *, sbitmap *, enum df_flow_dir,
- enum df_confluence_op,
- transfer_function_sbitmap,
- sbitmap, sbitmap, void *);
+static void df_rd_transfer_function (int, int *, void *, void *, void *,
+ void *, void *);
+static void df_ru_transfer_function (int, int *, void *, void *, void *,
+ void *, void *);
+static void df_lr_transfer_function (int, int *, void *, void *, void *,
+ void *, void *);
+static void hybrid_search (basic_block, struct dataflow *,
+ sbitmap, sbitmap, sbitmap);
/* Local memory allocation/deallocation routines. */
@@ -332,6 +324,26 @@ df_insn_table_realloc (struct df *df, unsigned int size)
}
}
+/* Increase the bb info table to have space for at least SIZE + 1
+ elements. */
+
+static void
+df_bb_table_realloc (struct df *df, unsigned int size)
+{
+ size++;
+ if (size <= df->n_bbs)
+ return;
+
+ /* Make the table a little larger than requested, so we do not need
+ to enlarge it so often. */
+ size += df->n_bbs / 4;
+
+ df->bbs = xrealloc (df->bbs, size * sizeof (struct bb_info));
+
+ memset (df->bbs + df->n_bbs, 0, (size - df->n_bbs) * sizeof (struct bb_info));
+
+ df->n_bbs = size;
+}
/* Increase the reg info table by SIZE more elements. */
static void
@@ -346,6 +358,8 @@ df_reg_table_realloc (struct df *df, int size)
size = max_reg_num ();
df->regs = xrealloc (df->regs, size * sizeof (struct reg_info));
+ df->reg_def_last = xrealloc (df->reg_def_last,
+ size * sizeof (struct ref *));
/* Zero the new entries. */
memset (df->regs + df->reg_size, 0,
@@ -356,67 +370,79 @@ df_reg_table_realloc (struct df *df, int size)
/* Allocate bitmaps for each basic block. */
+
static void
-df_bitmaps_alloc (struct df *df, int flags)
+df_bitmaps_alloc (struct df *df, bitmap blocks, int flags)
{
- int dflags = 0;
basic_block bb;
- /* Free the bitmaps if they need resizing. */
- if ((flags & DF_LR) && df->n_regs < (unsigned int) max_reg_num ())
- dflags |= DF_LR | DF_RU;
- if ((flags & DF_RU) && df->n_uses < df->use_id)
- dflags |= DF_RU;
- if ((flags & DF_RD) && df->n_defs < df->def_id)
- dflags |= DF_RD;
-
- if (dflags)
- df_bitmaps_free (df, dflags);
-
df->n_defs = df->def_id;
df->n_uses = df->use_id;
- FOR_EACH_BB (bb)
+ if (!blocks)
+ blocks = df->all_blocks;
+
+ FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
{
struct bb_info *bb_info = DF_BB_INFO (df, bb);
- if (flags & DF_RD && ! bb_info->rd_in)
+ if (flags & DF_RD)
{
- /* Allocate bitmaps for reaching definitions. */
- bb_info->rd_kill = BITMAP_XMALLOC ();
- bitmap_zero (bb_info->rd_kill);
- bb_info->rd_gen = BITMAP_XMALLOC ();
- bitmap_zero (bb_info->rd_gen);
- bb_info->rd_in = BITMAP_XMALLOC ();
- bb_info->rd_out = BITMAP_XMALLOC ();
- bb_info->rd_valid = 0;
+ if (!bb_info->rd_in)
+ {
+ /* Allocate bitmaps for reaching definitions. */
+ bb_info->rd_kill = BITMAP_XMALLOC ();
+ bb_info->rd_gen = BITMAP_XMALLOC ();
+ bb_info->rd_in = BITMAP_XMALLOC ();
+ bb_info->rd_out = BITMAP_XMALLOC ();
+ }
+ else
+ {
+ bitmap_clear (bb_info->rd_kill);
+ bitmap_clear (bb_info->rd_gen);
+ bitmap_clear (bb_info->rd_in);
+ bitmap_clear (bb_info->rd_out);
+ }
}
- if (flags & DF_RU && ! bb_info->ru_in)
+ if (flags & DF_RU)
{
- /* Allocate bitmaps for upward exposed uses. */
- bb_info->ru_kill = BITMAP_XMALLOC ();
- bitmap_zero (bb_info->ru_kill);
- /* Note the lack of symmetry. */
- bb_info->ru_gen = BITMAP_XMALLOC ();
- bitmap_zero (bb_info->ru_gen);
- bb_info->ru_in = BITMAP_XMALLOC ();
- bb_info->ru_out = BITMAP_XMALLOC ();
- bb_info->ru_valid = 0;
+ if (!bb_info->ru_in)
+ {
+ /* Allocate bitmaps for upward exposed uses. */
+ bb_info->ru_kill = BITMAP_XMALLOC ();
+ bb_info->ru_gen = BITMAP_XMALLOC ();
+ bb_info->ru_in = BITMAP_XMALLOC ();
+ bb_info->ru_out = BITMAP_XMALLOC ();
+ }
+ else
+ {
+ bitmap_clear (bb_info->ru_kill);
+ bitmap_clear (bb_info->ru_gen);
+ bitmap_clear (bb_info->ru_in);
+ bitmap_clear (bb_info->ru_out);
+ }
}
- if (flags & DF_LR && ! bb_info->lr_in)
+ if (flags & DF_LR)
{
- /* Allocate bitmaps for live variables. */
- bb_info->lr_def = BITMAP_XMALLOC ();
- bitmap_zero (bb_info->lr_def);
- bb_info->lr_use = BITMAP_XMALLOC ();
- bitmap_zero (bb_info->lr_use);
- bb_info->lr_in = BITMAP_XMALLOC ();
- bb_info->lr_out = BITMAP_XMALLOC ();
- bb_info->lr_valid = 0;
+ if (!bb_info->lr_in)
+ {
+ /* Allocate bitmaps for live variables. */
+ bb_info->lr_def = BITMAP_XMALLOC ();
+ bb_info->lr_use = BITMAP_XMALLOC ();
+ bb_info->lr_in = BITMAP_XMALLOC ();
+ bb_info->lr_out = BITMAP_XMALLOC ();
+ }
+ else
+ {
+ bitmap_clear (bb_info->lr_def);
+ bitmap_clear (bb_info->lr_use);
+ bitmap_clear (bb_info->lr_in);
+ bitmap_clear (bb_info->lr_out);
+ }
}
- }
+ });
}
@@ -507,8 +533,6 @@ df_alloc (struct df *df, int n_regs)
df->n_bbs = last_basic_block;
/* Allocate temporary working array used during local dataflow analysis. */
- df->reg_def_last = xmalloc (df->n_regs * sizeof (struct ref *));
-
df_insn_table_realloc (df, n_insns);
df_reg_table_realloc (df, df->n_regs);
@@ -571,7 +595,6 @@ df_free (struct df *df)
free_alloc_pool (df_ref_pool);
free_alloc_pool (df_link_pool);
-
}
/* Local miscellaneous routines. */
@@ -587,19 +610,6 @@ static rtx df_reg_use_gen (unsigned int regno)
use = gen_rtx_USE (GET_MODE (reg), reg);
return use;
}
-
-
-/* Return a CLOBBER for register REGNO. */
-static rtx df_reg_clobber_gen (unsigned int regno)
-{
- rtx reg;
- rtx use;
-
- reg = regno_reg_rtx[regno];
-
- use = gen_rtx_CLOBBER (GET_MODE (reg), reg);
- return use;
-}
/* Local chain manipulation routines. */
@@ -615,6 +625,21 @@ df_link_create (struct ref *ref, struct df_link *next)
return link;
}
+/* Releases members of the CHAIN. */
+
+static void
+free_reg_ref_chain (struct df_link **chain)
+{
+ struct df_link *act, *next;
+
+ for (act = *chain; act; act = next)
+ {
+ next = act->next;
+ pool_free (df_link_pool, act);
+ }
+
+ *chain = NULL;
+}
/* Add REF to chain head pointed to by PHEAD. */
static struct df_link *
@@ -629,8 +654,8 @@ df_ref_unlink (struct df_link **phead, struct ref *ref)
/* Only a single ref. It must be the one we want.
If not, the def-use and use-def chains are likely to
be inconsistent. */
- if (link->ref != ref)
- abort ();
+ gcc_assert (link->ref == ref);
+
/* Now have an empty chain. */
*phead = NULL;
}
@@ -778,6 +803,7 @@ df_ref_create (struct df *df, rtx reg, rtx *loc, rtx insn,
DF_REF_CHAIN (this_ref) = 0;
DF_REF_TYPE (this_ref) = ref_type;
DF_REF_FLAGS (this_ref) = ref_flags;
+ DF_REF_DATA (this_ref) = NULL;
if (ref_type == DF_REF_REG_DEF || ref_type == DF_REF_REG_CLOBBER)
{
@@ -825,8 +851,7 @@ df_ref_record (struct df *df, rtx reg, rtx *loc, rtx insn,
{
unsigned int regno;
- if (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG)
- abort ();
+ gcc_assert (REG_P (reg) || GET_CODE (reg) == SUBREG);
/* For the reg allocator we are interested in some SUBREG rtx's, but not
all. Notably only those representing a word extraction from a multi-word
@@ -858,7 +883,7 @@ df_ref_record (struct df *df, rtx reg, rtx *loc, rtx insn,
are really referenced. E.g., a (subreg:SI (reg:DI 0) 0) does _not_
reference the whole reg 0 in DI mode (which would also include
reg 1, at least, if 0 and 1 are SImode registers). */
- endregno = HARD_REGNO_NREGS (regno, GET_MODE (reg));
+ endregno = hard_regno_nregs[regno][GET_MODE (reg)];
if (GET_CODE (reg) == SUBREG)
regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
SUBREG_BYTE (reg), GET_MODE (reg));
@@ -953,8 +978,8 @@ df_def_record_1 (struct df *df, rtx x, basic_block bb, rtx insn)
flags |= DF_REF_READ_WRITE;
}
- if (GET_CODE (dst) == REG
- || (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG))
+ if (REG_P (dst)
+ || (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst))))
df_ref_record (df, dst, loc, insn,
GET_CODE (x) == CLOBBER
? DF_REF_REG_CLOBBER : DF_REF_REG_DEF,
@@ -1017,7 +1042,7 @@ df_uses_record (struct df *df, rtx *loc, enum df_ref_type ref_type,
case CLOBBER:
/* If we are clobbering a MEM, mark any registers inside the address
as being used. */
- if (GET_CODE (XEXP (x, 0)) == MEM)
+ if (MEM_P (XEXP (x, 0)))
df_uses_record (df, &XEXP (XEXP (x, 0), 0),
DF_REF_REG_MEM_STORE, bb, insn, flags);
@@ -1025,14 +1050,14 @@ df_uses_record (struct df *df, rtx *loc, enum df_ref_type ref_type,
return;
case MEM:
- df_uses_record (df, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn, flags);
+ df_uses_record (df, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn, 0);
return;
case SUBREG:
/* While we're here, optimize this case. */
/* In case the SUBREG is not of a REG, do not optimize. */
- if (GET_CODE (SUBREG_REG (x)) != REG)
+ if (!REG_P (SUBREG_REG (x)))
{
loc = &SUBREG_REG (x);
df_uses_record (df, loc, ref_type, bb, insn, flags);
@@ -1042,7 +1067,6 @@ df_uses_record (struct df *df, rtx *loc, enum df_ref_type ref_type,
/* ... Fall through ... */
case REG:
- /* See a REG (or SUBREG) other than being set. */
df_ref_record (df, x, loc, insn, ref_type, flags);
return;
@@ -1054,17 +1078,16 @@ df_uses_record (struct df *df, rtx *loc, enum df_ref_type ref_type,
switch (GET_CODE (dst))
{
- enum df_ref_flags use_flags;
case SUBREG:
if ((df->flags & DF_FOR_REGALLOC) == 0
&& read_modify_subreg_p (dst))
{
- use_flags = DF_REF_READ_WRITE | DF_REF_MODE_CHANGE;
df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
- insn, use_flags);
+ insn,
+ DF_REF_READ_WRITE | DF_REF_MODE_CHANGE);
break;
}
- /* ... FALLTHRU ... */
+ /* Fall through. */
case REG:
case PARALLEL:
case PC:
@@ -1076,13 +1099,12 @@ df_uses_record (struct df *df, rtx *loc, enum df_ref_type ref_type,
bb, insn, 0);
break;
case STRICT_LOW_PART:
- /* A strict_low_part uses the whole REG and not just the SUBREG. */
+ /* A strict_low_part uses the whole REG and not just the
+ SUBREG. */
dst = XEXP (dst, 0);
- if (GET_CODE (dst) != SUBREG)
- abort ();
- use_flags = DF_REF_READ_WRITE | DF_REF_MODE_CHANGE;
+ gcc_assert (GET_CODE (dst) == SUBREG);
df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
- insn, use_flags);
+ insn, DF_REF_READ_WRITE | DF_REF_MODE_CHANGE);
break;
case ZERO_EXTRACT:
case SIGN_EXTRACT:
@@ -1093,7 +1115,7 @@ df_uses_record (struct df *df, rtx *loc, enum df_ref_type ref_type,
dst = XEXP (dst, 0);
break;
default:
- abort ();
+ gcc_unreachable ();
}
return;
}
@@ -1209,7 +1231,7 @@ df_insn_refs_record (struct df *df, basic_block bb, rtx insn)
}
}
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
rtx note;
rtx x;
@@ -1245,20 +1267,15 @@ df_insn_refs_record (struct df *df, basic_block bb, rtx insn)
df_uses_record (df, &PATTERN (insn),
DF_REF_REG_USE, bb, insn, 0);
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
rtx note;
- if (0 && df->flags & DF_HARD_REGS)
- {
- /* Kill all registers invalidated by a call. */
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
- {
- rtx reg_clob = df_reg_clobber_gen (i);
- df_defs_record (df, reg_clob, bb, insn);
- }
- }
+ /* We do not record hard registers clobbered by the call,
+ since there are awfully many of them and "defs" created
+ through them are not interesting (since no use can be legally
+ reached by them). So we must just make sure we include them when
+ computing kill bitmaps. */
/* There may be extra registers to be clobbered. */
for (note = CALL_INSN_FUNCTION_USAGE (insn);
@@ -1278,15 +1295,13 @@ df_bb_refs_record (struct df *df, basic_block bb)
rtx insn;
/* Scan the block an insn at a time from beginning to end. */
- for (insn = bb->head; ; insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
{
/* Record defs within INSN. */
df_insn_refs_record (df, bb, insn);
}
- if (insn == bb->end)
- break;
}
}
@@ -1305,21 +1320,18 @@ df_refs_record (struct df *df, bitmap blocks)
/* Dataflow analysis routines. */
-
/* Create reg-def chains for basic block BB. These are a list of
definitions for each register. */
+
static void
df_bb_reg_def_chain_create (struct df *df, basic_block bb)
{
rtx insn;
/* Perhaps the defs should be sorted using a depth first search
- of the CFG (or possibly a breadth first search). We currently
- scan the basic blocks in reverse order so that the first defs
- appear at the start of the chain. */
+ of the CFG (or possibly a breadth first search). */
- for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
- insn = PREV_INSN (insn))
+ FOR_BB_INSNS_REVERSE (bb, insn)
{
struct df_link *link;
unsigned int uid = INSN_UID (insn);
@@ -1339,29 +1351,58 @@ df_bb_reg_def_chain_create (struct df *df, basic_block bb)
if (DF_REF_ID (def) < df->def_id_save)
continue;
- df->regs[dregno].defs
- = df_link_create (def, df->regs[dregno].defs);
+ df->regs[dregno].defs = df_link_create (def, df->regs[dregno].defs);
}
}
}
/* Create reg-def chains for each basic block within BLOCKS. These
- are a list of definitions for each register. */
+ are a list of definitions for each register. If REDO is true, add
+ all defs, otherwise just add the new defs. */
+
static void
-df_reg_def_chain_create (struct df *df, bitmap blocks)
+df_reg_def_chain_create (struct df *df, bitmap blocks, bool redo)
{
basic_block bb;
+#ifdef ENABLE_CHECKING
+ unsigned regno;
+#endif
+ unsigned old_def_id_save = df->def_id_save;
+
+ if (redo)
+ {
+#ifdef ENABLE_CHECKING
+ for (regno = 0; regno < df->n_regs; regno++)
+ gcc_assert (!df->regs[regno].defs);
+#endif
+
+ /* Pretend that all defs are new. */
+ df->def_id_save = 0;
+ }
- FOR_EACH_BB_IN_BITMAP/*_REV*/ (blocks, 0, bb,
+ FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
{
df_bb_reg_def_chain_create (df, bb);
});
+
+ df->def_id_save = old_def_id_save;
}
+/* Remove all reg-def chains stored in the dataflow object DF. */
+
+static void
+df_reg_def_chain_clean (struct df *df)
+{
+ unsigned regno;
+
+ for (regno = 0; regno < df->n_regs; regno++)
+ free_reg_ref_chain (&df->regs[regno].defs);
+}
/* Create reg-use chains for basic block BB. These are a list of uses
for each register. */
+
static void
df_bb_reg_use_chain_create (struct df *df, basic_block bb)
{
@@ -1370,8 +1411,7 @@ df_bb_reg_use_chain_create (struct df *df, basic_block bb)
/* Scan in forward order so that the last uses appear at the start
of the chain. */
- for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
- insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (bb, insn)
{
struct df_link *link;
unsigned int uid = INSN_UID (insn);
@@ -1399,18 +1439,47 @@ df_bb_reg_use_chain_create (struct df *df, basic_block bb)
/* Create reg-use chains for each basic block within BLOCKS. These
- are a list of uses for each register. */
+ are a list of uses for each register. If REDO is true, remove the
+ old reg-use chains first, otherwise just add new uses to them. */
+
static void
-df_reg_use_chain_create (struct df *df, bitmap blocks)
+df_reg_use_chain_create (struct df *df, bitmap blocks, bool redo)
{
basic_block bb;
+#ifdef ENABLE_CHECKING
+ unsigned regno;
+#endif
+ unsigned old_use_id_save = df->use_id_save;
+
+ if (redo)
+ {
+#ifdef ENABLE_CHECKING
+ for (regno = 0; regno < df->n_regs; regno++)
+ gcc_assert (!df->regs[regno].uses);
+#endif
+
+ /* Pretend that all uses are new. */
+ df->use_id_save = 0;
+ }
FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
{
df_bb_reg_use_chain_create (df, bb);
});
+
+ df->use_id_save = old_use_id_save;
}
+/* Remove all reg-use chains stored in the dataflow object DF. */
+
+static void
+df_reg_use_chain_clean (struct df *df)
+{
+ unsigned regno;
+
+ for (regno = 0; regno < df->n_regs; regno++)
+ free_reg_ref_chain (&df->regs[regno].uses);
+}
/* Create def-use chains from reaching use bitmaps for basic block BB. */
static void
@@ -1423,8 +1492,7 @@ df_bb_du_chain_create (struct df *df, basic_block bb, bitmap ru)
/* For each def in BB create a linked list (chain) of uses
reached from the def. */
- for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
- insn = PREV_INSN (insn))
+ FOR_BB_INSNS_REVERSE (bb, insn)
{
struct df_link *def_link;
struct df_link *use_link;
@@ -1500,8 +1568,7 @@ df_bb_ud_chain_create (struct df *df, basic_block bb)
/* For each use in BB create a linked list (chain) of defs
that reach the use. */
- for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
- insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (bb, insn)
{
unsigned int uid = INSN_UID (insn);
struct df_link *use_link;
@@ -1577,8 +1644,8 @@ df_ud_chain_create (struct df *df, bitmap blocks)
static void
-df_rd_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, bitmap in,
- bitmap out, bitmap gen, bitmap kill,
+df_rd_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, void *in,
+ void *out, void *gen, void *kill,
void *data ATTRIBUTE_UNUSED)
{
*changed = bitmap_union_of_diff (out, gen, in, kill);
@@ -1586,8 +1653,8 @@ df_rd_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, bitmap in,
static void
-df_ru_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, bitmap in,
- bitmap out, bitmap gen, bitmap kill,
+df_ru_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, void *in,
+ void *out, void *gen, void *kill,
void *data ATTRIBUTE_UNUSED)
{
*changed = bitmap_union_of_diff (in, gen, out, kill);
@@ -1595,8 +1662,8 @@ df_ru_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, bitmap in,
static void
-df_lr_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, bitmap in,
- bitmap out, bitmap use, bitmap def,
+df_lr_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, void *in,
+ void *out, void *use, void *def,
void *data ATTRIBUTE_UNUSED)
{
*changed = bitmap_union_of_diff (in, use, out, def);
@@ -1605,13 +1672,14 @@ df_lr_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, bitmap in,
/* Compute local reaching def info for basic block BB. */
static void
-df_bb_rd_local_compute (struct df *df, basic_block bb)
+df_bb_rd_local_compute (struct df *df, basic_block bb, bitmap call_killed_defs)
{
struct bb_info *bb_info = DF_BB_INFO (df, bb);
rtx insn;
+ bitmap seen = BITMAP_XMALLOC ();
+ bool call_seen = false;
- for (insn = bb->head; insn && insn != NEXT_INSN (bb->end);
- insn = NEXT_INSN (insn))
+ FOR_BB_INSNS_REVERSE (bb, insn)
{
unsigned int uid = INSN_UID (insn);
struct df_link *def_link;
@@ -1625,6 +1693,12 @@ df_bb_rd_local_compute (struct df *df, basic_block bb)
unsigned int regno = DF_REF_REGNO (def);
struct df_link *def2_link;
+ if (bitmap_bit_p (seen, regno)
+ || (call_seen
+ && regno < FIRST_PSEUDO_REGISTER
+ && TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)))
+ continue;
+
for (def2_link = df->regs[regno].defs; def2_link;
def2_link = def2_link->next)
{
@@ -1635,16 +1709,21 @@ df_bb_rd_local_compute (struct df *df, basic_block bb)
be killed by this BB but it keeps things a lot
simpler. */
bitmap_set_bit (bb_info->rd_kill, DF_REF_ID (def2));
-
- /* Zap from the set of gens for this BB. */
- bitmap_clear_bit (bb_info->rd_gen, DF_REF_ID (def2));
}
bitmap_set_bit (bb_info->rd_gen, DF_REF_ID (def));
+ bitmap_set_bit (seen, regno);
+ }
+
+ if (CALL_P (insn) && (df->flags & DF_HARD_REGS))
+ {
+ bitmap_operation (bb_info->rd_kill, bb_info->rd_kill,
+ call_killed_defs, BITMAP_IOR);
+ call_seen = 1;
}
}
- bb_info->rd_valid = 1;
+ BITMAP_XFREE (seen);
}
@@ -1653,11 +1732,32 @@ static void
df_rd_local_compute (struct df *df, bitmap blocks)
{
basic_block bb;
+ bitmap killed_by_call = NULL;
+ unsigned regno;
+ struct df_link *def_link;
+
+ if (df->flags & DF_HARD_REGS)
+ {
+ killed_by_call = BITMAP_XMALLOC ();
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ if (!TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
+ continue;
+
+ for (def_link = df->regs[regno].defs;
+ def_link;
+ def_link = def_link->next)
+ bitmap_set_bit (killed_by_call, DF_REF_ID (def_link->ref));
+ }
+ }
FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
{
- df_bb_rd_local_compute (df, bb);
+ df_bb_rd_local_compute (df, bb, killed_by_call);
});
+
+ if (df->flags & DF_HARD_REGS)
+ BITMAP_XFREE (killed_by_call);
}
@@ -1674,8 +1774,7 @@ df_bb_ru_local_compute (struct df *df, basic_block bb)
rtx insn;
- for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
- insn = PREV_INSN (insn))
+ FOR_BB_INSNS_REVERSE (bb, insn)
{
unsigned int uid = INSN_UID (insn);
struct df_link *def_link;
@@ -1712,7 +1811,6 @@ df_bb_ru_local_compute (struct df *df, basic_block bb)
bitmap_set_bit (bb_info->ru_gen, DF_REF_ID (use));
}
}
- bb_info->ru_valid = 1;
}
@@ -1737,8 +1835,7 @@ df_bb_lr_local_compute (struct df *df, basic_block bb)
struct bb_info *bb_info = DF_BB_INFO (df, bb);
rtx insn;
- for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
- insn = PREV_INSN (insn))
+ FOR_BB_INSNS_REVERSE (bb, insn)
{
unsigned int uid = INSN_UID (insn);
struct df_link *link;
@@ -1764,7 +1861,6 @@ df_bb_lr_local_compute (struct df *df, basic_block bb)
bitmap_set_bit (bb_info->lr_use, DF_REF_REGNO (use));
}
}
- bb_info->lr_valid = 1;
}
@@ -1792,12 +1888,12 @@ df_bb_reg_info_compute (struct df *df, basic_block bb, bitmap live)
bitmap_copy (live, bb_info->lr_out);
- for (insn = bb->end; insn && insn != PREV_INSN (bb->head);
- insn = PREV_INSN (insn))
+ FOR_BB_INSNS_REVERSE (bb, insn)
{
unsigned int uid = INSN_UID (insn);
unsigned int regno;
struct df_link *link;
+ bitmap_iterator bi;
if (! INSN_P (insn))
continue;
@@ -1823,10 +1919,10 @@ df_bb_reg_info_compute (struct df *df, basic_block bb, bitmap live)
}
/* Increment lifetimes of all live registers. */
- EXECUTE_IF_SET_IN_BITMAP (live, 0, regno,
- {
- reg_info[regno].lifetime++;
- });
+ EXECUTE_IF_SET_IN_BITMAP (live, 0, regno, bi)
+ {
+ reg_info[regno].lifetime++;
+ }
}
}
@@ -1858,14 +1954,11 @@ df_bb_luids_set (struct df *df, basic_block bb)
/* The LUIDs are monotonically increasing for each basic block. */
- for (insn = bb->head; ; insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (bb, insn)
{
if (INSN_P (insn))
DF_INSN_LUID (df, insn) = luid++;
DF_INSN_LUID (df, insn) = luid;
-
- if (insn == bb->end)
- break;
}
return luid;
}
@@ -1889,12 +1982,13 @@ df_luids_set (struct df *df, bitmap blocks)
/* Perform dataflow analysis using existing DF structure for blocks
within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */
static void
-df_analyse_1 (struct df *df, bitmap blocks, int flags, int update)
+df_analyze_1 (struct df *df, bitmap blocks, int flags, int update)
{
int aflags;
int dflags;
int i;
basic_block bb;
+ struct dataflow dflow;
dflags = 0;
aflags = flags;
@@ -1916,7 +2010,7 @@ df_analyse_1 (struct df *df, bitmap blocks, int flags, int update)
df->flags = flags;
if (update)
{
- df_refs_update (df);
+ df_refs_update (df, NULL);
/* More fine grained incremental dataflow analysis would be
nice. For now recompute the whole shebang for the
modified blocks. */
@@ -1940,7 +2034,7 @@ df_analyse_1 (struct df *df, bitmap blocks, int flags, int update)
/* Allocate the bitmaps now the total number of defs and uses are
known. If the number of defs or uses have changed, then
these bitmaps need to be reallocated. */
- df_bitmaps_alloc (df, aflags);
+ df_bitmaps_alloc (df, NULL, aflags);
/* Set the LUIDs for each specified basic block. */
df_luids_set (df, blocks);
@@ -1951,12 +2045,12 @@ df_analyse_1 (struct df *df, bitmap blocks, int flags, int update)
regs local to a basic block as it speeds up searching. */
if (aflags & DF_RD_CHAIN)
{
- df_reg_def_chain_create (df, blocks);
+ df_reg_def_chain_create (df, blocks, false);
}
if (aflags & DF_RU_CHAIN)
{
- df_reg_use_chain_create (df, blocks);
+ df_reg_use_chain_create (df, blocks, false);
}
df->dfs_order = xmalloc (sizeof (int) * n_basic_blocks);
@@ -1977,27 +2071,33 @@ df_analyse_1 (struct df *df, bitmap blocks, int flags, int update)
if (aflags & DF_RD)
{
/* Compute the sets of gens and kills for the defs of each bb. */
+ dflow.in = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.out = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block);
+
df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks);
- {
- bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
- FOR_EACH_BB (bb)
- {
- in[bb->index] = DF_BB_INFO (df, bb)->rd_in;
- out[bb->index] = DF_BB_INFO (df, bb)->rd_out;
- gen[bb->index] = DF_BB_INFO (df, bb)->rd_gen;
- kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill;
- }
- iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
- DF_FORWARD, DF_UNION, df_rd_transfer_function,
- df->inverse_rc_map, NULL);
- free (in);
- free (out);
- free (gen);
- free (kill);
- }
+ FOR_EACH_BB (bb)
+ {
+ dflow.in[bb->index] = DF_BB_INFO (df, bb)->rd_in;
+ dflow.out[bb->index] = DF_BB_INFO (df, bb)->rd_out;
+ dflow.gen[bb->index] = DF_BB_INFO (df, bb)->rd_gen;
+ dflow.kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill;
+ }
+
+ dflow.repr = SR_BITMAP;
+ dflow.dir = DF_FORWARD;
+ dflow.conf_op = DF_UNION;
+ dflow.transfun = df_rd_transfer_function;
+ dflow.n_blocks = n_basic_blocks;
+ dflow.order = df->rc_order;
+ dflow.data = NULL;
+
+ iterative_dataflow (&dflow);
+ free (dflow.in);
+ free (dflow.out);
+ free (dflow.gen);
+ free (dflow.kill);
}
if (aflags & DF_UD_CHAIN)
@@ -2013,27 +2113,34 @@ df_analyse_1 (struct df *df, bitmap blocks, int flags, int update)
{
/* Compute the sets of gens and kills for the upwards exposed
uses in each bb. */
+ dflow.in = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.out = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block);
+
df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks);
- {
- bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
- FOR_EACH_BB (bb)
- {
- in[bb->index] = DF_BB_INFO (df, bb)->ru_in;
- out[bb->index] = DF_BB_INFO (df, bb)->ru_out;
- gen[bb->index] = DF_BB_INFO (df, bb)->ru_gen;
- kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill;
- }
- iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
- DF_BACKWARD, DF_UNION, df_ru_transfer_function,
- df->inverse_rts_map, NULL);
- free (in);
- free (out);
- free (gen);
- free (kill);
- }
+
+ FOR_EACH_BB (bb)
+ {
+ dflow.in[bb->index] = DF_BB_INFO (df, bb)->ru_in;
+ dflow.out[bb->index] = DF_BB_INFO (df, bb)->ru_out;
+ dflow.gen[bb->index] = DF_BB_INFO (df, bb)->ru_gen;
+ dflow.kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill;
+ }
+
+ dflow.repr = SR_BITMAP;
+ dflow.dir = DF_BACKWARD;
+ dflow.conf_op = DF_UNION;
+ dflow.transfun = df_ru_transfer_function;
+ dflow.n_blocks = n_basic_blocks;
+ dflow.order = df->rts_order;
+ dflow.data = NULL;
+
+ iterative_dataflow (&dflow);
+ free (dflow.in);
+ free (dflow.out);
+ free (dflow.gen);
+ free (dflow.kill);
}
if (aflags & DF_DU_CHAIN)
@@ -2052,27 +2159,34 @@ df_analyse_1 (struct df *df, bitmap blocks, int flags, int update)
if (aflags & DF_LR)
{
/* Compute the sets of defs and uses of live variables. */
+ dflow.in = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.out = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block);
+
df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks);
- {
- bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *use = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *def = xmalloc (sizeof (bitmap) * last_basic_block);
- FOR_EACH_BB (bb)
- {
- in[bb->index] = DF_BB_INFO (df, bb)->lr_in;
- out[bb->index] = DF_BB_INFO (df, bb)->lr_out;
- use[bb->index] = DF_BB_INFO (df, bb)->lr_use;
- def[bb->index] = DF_BB_INFO (df, bb)->lr_def;
- }
- iterative_dataflow_bitmap (in, out, use, def, df->all_blocks,
- DF_BACKWARD, DF_UNION, df_lr_transfer_function,
- df->inverse_rts_map, NULL);
- free (in);
- free (out);
- free (use);
- free (def);
- }
+
+ FOR_EACH_BB (bb)
+ {
+ dflow.in[bb->index] = DF_BB_INFO (df, bb)->lr_in;
+ dflow.out[bb->index] = DF_BB_INFO (df, bb)->lr_out;
+ dflow.gen[bb->index] = DF_BB_INFO (df, bb)->lr_use;
+ dflow.kill[bb->index] = DF_BB_INFO (df, bb)->lr_def;
+ }
+
+ dflow.repr = SR_BITMAP;
+ dflow.dir = DF_BACKWARD;
+ dflow.conf_op = DF_UNION;
+ dflow.transfun = df_lr_transfer_function;
+ dflow.n_blocks = n_basic_blocks;
+ dflow.order = df->rts_order;
+ dflow.data = NULL;
+
+ iterative_dataflow (&dflow);
+ free (dflow.in);
+ free (dflow.out);
+ free (dflow.gen);
+ free (dflow.kill);
}
if (aflags & DF_REG_INFO)
@@ -2159,7 +2273,7 @@ df_bb_refs_update (struct df *df, basic_block bb)
a bitmap for insns_modified saves memory and avoids queuing
duplicates. */
- for (insn = bb->head; ; insn = NEXT_INSN (insn))
+ FOR_BB_INSNS (bb, insn)
{
unsigned int uid;
@@ -2176,8 +2290,6 @@ df_bb_refs_update (struct df *df, basic_block bb)
count++;
}
- if (insn == bb->end)
- break;
}
return count;
}
@@ -2185,15 +2297,17 @@ df_bb_refs_update (struct df *df, basic_block bb)
/* Process all the modified/deleted insns that were queued. */
static int
-df_refs_update (struct df *df)
+df_refs_update (struct df *df, bitmap blocks)
{
basic_block bb;
bitmap b = BITMAP_XMALLOC ();
rtx insn;
- int count = 0;
+ int count = 0, bbno;
unsigned int uid;
+ bitmap_iterator bi;
- if ((unsigned int)max_reg_num () >= df->reg_size)
+ df->n_regs = max_reg_num ();
+ if (df->n_regs >= df->reg_size)
df_reg_table_realloc (df, 0);
df_refs_queue (df);
@@ -2203,13 +2317,23 @@ df_refs_update (struct df *df)
bitmap_copy (b, df->insns_modified);
for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
bitmap_clear_bit (b, INSN_UID (insn));
- EXECUTE_IF_SET_IN_BITMAP (b, 0, uid,
- df_uid_refs_remove (df, uid););
+ EXECUTE_IF_SET_IN_BITMAP (b, 0, uid, bi)
+ df_uid_refs_remove (df, uid);
- FOR_EACH_BB_IN_BITMAP (df->bbs_modified, 0, bb,
+ if (!blocks)
{
- count += df_bb_refs_update (df, bb);
- });
+ FOR_EACH_BB_IN_BITMAP (df->bbs_modified, 0, bb,
+ {
+ count += df_bb_refs_update (df, bb);
+ });
+ }
+ else
+ {
+ EXECUTE_IF_AND_IN_BITMAP (df->bbs_modified, blocks, 0, bbno, bi)
+ {
+ count += df_bb_refs_update (df, BASIC_BLOCK (bbno));
+ }
+ }
BITMAP_XFREE (b);
@@ -2240,19 +2364,18 @@ df_modified_p (struct df *df, bitmap blocks)
return update;
}
-
/* Analyze dataflow info for the basic blocks specified by the bitmap
BLOCKS, or for the whole CFG if BLOCKS is zero, or just for the
modified blocks if BLOCKS is -1. */
+
int
-df_analyse (struct df *df, bitmap blocks, int flags)
+df_analyze (struct df *df, bitmap blocks, int flags)
{
int update;
/* We could deal with additional basic blocks being created by
rescanning everything again. */
- if (df->n_bbs && df->n_bbs != (unsigned int) last_basic_block)
- abort ();
+ gcc_assert (!df->n_bbs || df->n_bbs == (unsigned int) last_basic_block);
update = df_modified_p (df, blocks);
if (update || (flags != df->flags))
@@ -2266,7 +2389,7 @@ df_analyse (struct df *df, bitmap blocks, int flags)
}
/* Allocate and initialize data structures. */
df_alloc (df, max_reg_num ());
- df_analyse_1 (df, 0, flags, 0);
+ df_analyze_1 (df, 0, flags, 0);
update = 1;
}
else
@@ -2274,10 +2397,9 @@ df_analyse (struct df *df, bitmap blocks, int flags)
if (blocks == (bitmap) -1)
blocks = df->bbs_modified;
- if (! df->n_bbs)
- abort ();
+ gcc_assert (df->n_bbs);
- df_analyse_1 (df, blocks, flags, 1);
+ df_analyze_1 (df, blocks, flags, 1);
bitmap_zero (df->bbs_modified);
bitmap_zero (df->insns_modified);
}
@@ -2285,6 +2407,220 @@ df_analyse (struct df *df, bitmap blocks, int flags)
return update;
}
+/* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
+ the order of the remaining entries. Returns the length of the resulting
+ list. */
+
+static unsigned
+prune_to_subcfg (int list[], unsigned len, bitmap blocks)
+{
+ unsigned act, last;
+
+ for (act = 0, last = 0; act < len; act++)
+ if (bitmap_bit_p (blocks, list[act]))
+ list[last++] = list[act];
+
+ return last;
+}
+
+/* Alternative entry point to the analysis. Analyze just the part of the cfg
+ graph induced by BLOCKS.
+
+ TODO I am not quite sure how to avoid code duplication with df_analyze_1
+ here, and simultaneously not make even greater chaos in it. We behave
+ slightly differently in some details, especially in handling modified
+ insns. */
+
+void
+df_analyze_subcfg (struct df *df, bitmap blocks, int flags)
+{
+ rtx insn;
+ basic_block bb;
+ struct dataflow dflow;
+ unsigned n_blocks;
+
+ if (flags & DF_UD_CHAIN)
+ flags |= DF_RD | DF_RD_CHAIN;
+ if (flags & DF_DU_CHAIN)
+ flags |= DF_RU;
+ if (flags & DF_RU)
+ flags |= DF_RU_CHAIN;
+ if (flags & DF_REG_INFO)
+ flags |= DF_LR;
+
+ if (!df->n_bbs)
+ {
+ df_alloc (df, max_reg_num ());
+
+ /* Mark all insns as modified. */
+
+ FOR_EACH_BB (bb)
+ {
+ FOR_BB_INSNS (bb, insn)
+ {
+ df_insn_modify (df, bb, insn);
+ }
+ }
+ }
+
+ df->flags = flags;
+
+ df_reg_def_chain_clean (df);
+ df_reg_use_chain_clean (df);
+
+ df_refs_update (df, blocks);
+
+ /* Clear the updated stuff from ``modified'' bitmaps. */
+ FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
+ {
+ if (bitmap_bit_p (df->bbs_modified, bb->index))
+ {
+ FOR_BB_INSNS (bb, insn)
+ {
+ bitmap_clear_bit (df->insns_modified, INSN_UID (insn));
+ }
+
+ bitmap_clear_bit (df->bbs_modified, bb->index);
+ }
+ });
+
+ /* Allocate the bitmaps now the total number of defs and uses are
+ known. If the number of defs or uses have changed, then
+ these bitmaps need to be reallocated. */
+ df_bitmaps_alloc (df, blocks, flags);
+
+ /* Set the LUIDs for each specified basic block. */
+ df_luids_set (df, blocks);
+
+ /* Recreate reg-def and reg-use chains from scratch so that first
+ def is at the head of the reg-def chain and the last use is at
+ the head of the reg-use chain. This is only important for
+ regs local to a basic block as it speeds up searching. */
+ if (flags & DF_RD_CHAIN)
+ {
+ df_reg_def_chain_create (df, blocks, true);
+ }
+
+ if (flags & DF_RU_CHAIN)
+ {
+ df_reg_use_chain_create (df, blocks, true);
+ }
+
+ df->dfs_order = xmalloc (sizeof (int) * n_basic_blocks);
+ df->rc_order = xmalloc (sizeof (int) * n_basic_blocks);
+ df->rts_order = xmalloc (sizeof (int) * n_basic_blocks);
+
+ flow_depth_first_order_compute (df->dfs_order, df->rc_order);
+ flow_reverse_top_sort_order_compute (df->rts_order);
+
+ n_blocks = prune_to_subcfg (df->dfs_order, n_basic_blocks, blocks);
+ prune_to_subcfg (df->rc_order, n_basic_blocks, blocks);
+ prune_to_subcfg (df->rts_order, n_basic_blocks, blocks);
+
+ dflow.in = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.out = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block);
+ dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block);
+
+ if (flags & DF_RD)
+ {
+ /* Compute the sets of gens and kills for the defs of each bb. */
+ df_rd_local_compute (df, blocks);
+
+ FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
+ {
+ dflow.in[bb->index] = DF_BB_INFO (df, bb)->rd_in;
+ dflow.out[bb->index] = DF_BB_INFO (df, bb)->rd_out;
+ dflow.gen[bb->index] = DF_BB_INFO (df, bb)->rd_gen;
+ dflow.kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill;
+ });
+
+ dflow.repr = SR_BITMAP;
+ dflow.dir = DF_FORWARD;
+ dflow.conf_op = DF_UNION;
+ dflow.transfun = df_rd_transfer_function;
+ dflow.n_blocks = n_blocks;
+ dflow.order = df->rc_order;
+ dflow.data = NULL;
+
+ iterative_dataflow (&dflow);
+ }
+
+ if (flags & DF_UD_CHAIN)
+ {
+ /* Create use-def chains. */
+ df_ud_chain_create (df, blocks);
+ }
+
+ if (flags & DF_RU)
+ {
+ /* Compute the sets of gens and kills for the upwards exposed
+ uses in each bb. */
+ df_ru_local_compute (df, blocks);
+
+ FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
+ {
+ dflow.in[bb->index] = DF_BB_INFO (df, bb)->ru_in;
+ dflow.out[bb->index] = DF_BB_INFO (df, bb)->ru_out;
+ dflow.gen[bb->index] = DF_BB_INFO (df, bb)->ru_gen;
+ dflow.kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill;
+ });
+
+ dflow.repr = SR_BITMAP;
+ dflow.dir = DF_BACKWARD;
+ dflow.conf_op = DF_UNION;
+ dflow.transfun = df_ru_transfer_function;
+ dflow.n_blocks = n_blocks;
+ dflow.order = df->rts_order;
+ dflow.data = NULL;
+
+ iterative_dataflow (&dflow);
+ }
+
+ if (flags & DF_DU_CHAIN)
+ {
+ /* Create def-use chains. */
+ df_du_chain_create (df, blocks);
+ }
+
+ if (flags & DF_LR)
+ {
+ /* Compute the sets of defs and uses of live variables. */
+ df_lr_local_compute (df, blocks);
+
+ FOR_EACH_BB (bb)
+ {
+ dflow.in[bb->index] = DF_BB_INFO (df, bb)->lr_in;
+ dflow.out[bb->index] = DF_BB_INFO (df, bb)->lr_out;
+ dflow.gen[bb->index] = DF_BB_INFO (df, bb)->lr_use;
+ dflow.kill[bb->index] = DF_BB_INFO (df, bb)->lr_def;
+ }
+
+ dflow.repr = SR_BITMAP;
+ dflow.dir = DF_BACKWARD;
+ dflow.conf_op = DF_UNION;
+ dflow.transfun = df_lr_transfer_function;
+ dflow.n_blocks = n_blocks;
+ dflow.order = df->rts_order;
+ dflow.data = NULL;
+
+ iterative_dataflow (&dflow);
+ }
+
+ if (flags & DF_REG_INFO)
+ {
+ df_reg_info_compute (df, blocks);
+ }
+
+ free (dflow.in);
+ free (dflow.out);
+ free (dflow.gen);
+ free (dflow.kill);
+
+ free (df->dfs_order);
+ free (df->rc_order);
+ free (df->rts_order);
+}
/* Free all the dataflow info and the DF structure. */
void
@@ -2358,14 +2694,14 @@ df_bb_refs_unlink (struct df *df, basic_block bb)
rtx insn;
/* Scan the block an insn at a time from beginning to end. */
- for (insn = bb->head; ; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
{
/* Unlink refs for INSN. */
df_insn_refs_unlink (df, bb, insn);
}
- if (insn == bb->end)
+ if (insn == BB_END (bb))
break;
}
}
@@ -2404,8 +2740,7 @@ df_insn_delete (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn)
handle the JUMP_LABEL? */
/* We should not be deleting the NOTE_INSN_BASIC_BLOCK or label. */
- if (insn == bb->head)
- abort ();
+ gcc_assert (insn != BB_HEAD (bb));
/* Delete the insn. */
delete_insn (insn);
@@ -2415,6 +2750,16 @@ df_insn_delete (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn)
return NEXT_INSN (insn);
}
+/* Mark that basic block BB was modified. */
+
+static void
+df_bb_modify (struct df *df, basic_block bb)
+{
+ if ((unsigned) bb->index >= df->n_bbs)
+ df_bb_table_realloc (df, df->n_bbs);
+
+ bitmap_set_bit (df->bbs_modified, bb->index);
+}
/* Mark that INSN within BB may have changed (created/modified/deleted).
This may be called multiple times for the same insn. There is no
@@ -2429,7 +2774,7 @@ df_insn_modify (struct df *df, basic_block bb, rtx insn)
if (uid >= df->insn_size)
df_insn_table_realloc (df, uid);
- bitmap_set_bit (df->bbs_modified, bb->index);
+ df_bb_modify (df, bb);
bitmap_set_bit (df->insns_modified, uid);
/* For incremental updating on the fly, perhaps we could make a copy
@@ -2439,7 +2784,6 @@ df_insn_modify (struct df *df, basic_block bb, rtx insn)
will just get ignored. */
}
-
typedef struct replace_args
{
rtx match;
@@ -2509,7 +2853,7 @@ df_insn_mem_replace (struct df *df, basic_block bb, rtx insn, rtx mem, rtx reg)
in INSN. REG should be a new pseudo so it won't affect the
dataflow information that we currently have. We should add
the new uses and defs to INSN and then recreate the chains
- when df_analyse is called. */
+ when df_analyze is called. */
return args.modified;
}
@@ -2560,24 +2904,17 @@ df_refs_reg_replace (struct df *df, bitmap blocks, struct df_link *chain, rtx ol
if (! INSN_P (insn))
continue;
- if (bitmap_bit_p (blocks, DF_REF_BBNO (ref)))
- {
- df_ref_reg_replace (df, ref, oldreg, newreg);
+ gcc_assert (bitmap_bit_p (blocks, DF_REF_BBNO (ref)));
+
+ df_ref_reg_replace (df, ref, oldreg, newreg);
- /* Replace occurrences of the reg within the REG_NOTES. */
- if ((! link->next || DF_REF_INSN (ref)
- != DF_REF_INSN (link->next->ref))
- && REG_NOTES (insn))
- {
- args.insn = insn;
- for_each_rtx (&REG_NOTES (insn), df_rtx_reg_replace, &args);
- }
- }
- else
+ /* Replace occurrences of the reg within the REG_NOTES. */
+ if ((! link->next || DF_REF_INSN (ref)
+ != DF_REF_INSN (link->next->ref))
+ && REG_NOTES (insn))
{
- /* Temporary check to ensure that we have a grip on which
- regs should be replaced. */
- abort ();
+ args.insn = insn;
+ for_each_rtx (&REG_NOTES (insn), df_rtx_reg_replace, &args);
}
}
}
@@ -2608,8 +2945,7 @@ df_ref_reg_replace (struct df *df, struct ref *ref, rtx oldreg, rtx newreg)
if (! INSN_P (DF_REF_INSN (ref)))
return 0;
- if (oldreg && oldreg != DF_REF_REG (ref))
- abort ();
+ gcc_assert (!oldreg || oldreg == DF_REF_REG (ref));
if (! validate_change (DF_REF_INSN (ref), DF_REF_LOC (ref), newreg, 1))
return 0;
@@ -2675,10 +3011,8 @@ df_insns_modify (struct df *df, basic_block bb, rtx first_insn, rtx last_insn)
/* A non-const call should not have slipped through the net. If
it does, we need to create a new basic block. Ouch. The
same applies for a label. */
- if ((GET_CODE (insn) == CALL_INSN
- && ! CONST_OR_PURE_CALL_P (insn))
- || GET_CODE (insn) == CODE_LABEL)
- abort ();
+ gcc_assert ((!CALL_P (insn) || CONST_OR_PURE_CALL_P (insn))
+ && !LABEL_P (insn));
uid = INSN_UID (insn);
@@ -2701,8 +3035,7 @@ df_pattern_emit_before (struct df *df, rtx pattern, basic_block bb, rtx insn)
rtx prev_insn = PREV_INSN (insn);
/* We should not be inserting before the start of the block. */
- if (insn == bb->head)
- abort ();
+ gcc_assert (insn != BB_HEAD (bb));
ret_insn = emit_insn_before (pattern, insn);
if (ret_insn == insn)
return ret_insn;
@@ -2769,7 +3102,7 @@ df_insn_move_before (struct df *df, basic_block bb, rtx insn, basic_block before
are likely to be increased. */
/* ???? Perhaps all the insns moved should be stored on a list
- which df_analyse removes when it recalculates data flow. */
+ which df_analyze removes when it recalculates data flow. */
return emit_insn_before (insn, before_insn);
}
@@ -2797,6 +3130,34 @@ df_insn_regno_def_p (struct df *df, basic_block bb ATTRIBUTE_UNUSED,
return 0;
}
+/* Finds the reference corresponding to the definition of REG in INSN.
+ DF is the dataflow object. */
+
+struct ref *
+df_find_def (struct df *df, rtx insn, rtx reg)
+{
+ struct df_link *defs;
+
+ for (defs = DF_INSN_DEFS (df, insn); defs; defs = defs->next)
+ if (rtx_equal_p (DF_REF_REG (defs->ref), reg))
+ return defs->ref;
+
+ return NULL;
+}
+
+/* Return 1 if REG is referenced in INSN, zero otherwise. */
+
+int
+df_reg_used (struct df *df, rtx insn, rtx reg)
+{
+ struct df_link *uses;
+
+ for (uses = DF_INSN_USES (df, insn); uses; uses = uses->next)
+ if (rtx_equal_p (DF_REF_REG (uses->ref), reg))
+ return 1;
+
+ return 0;
+}
static int
df_def_dominates_all_uses_p (struct df *df ATTRIBUTE_UNUSED, struct ref *def)
@@ -2932,10 +3293,7 @@ df_bb_reg_live_start_p (struct df *df, basic_block bb, rtx reg)
{
struct bb_info *bb_info = DF_BB_INFO (df, bb);
-#ifdef ENABLE_CHECKING
- if (! bb_info->lr_in)
- abort ();
-#endif
+ gcc_assert (bb_info->lr_in);
return bitmap_bit_p (bb_info->lr_in, REGNO (reg));
}
@@ -2947,10 +3305,7 @@ df_bb_reg_live_end_p (struct df *df, basic_block bb, rtx reg)
{
struct bb_info *bb_info = DF_BB_INFO (df, bb);
-#ifdef ENABLE_CHECKING
- if (! bb_info->lr_in)
- abort ();
-#endif
+ gcc_assert (bb_info->lr_in);
return bitmap_bit_p (bb_info->lr_out, REGNO (reg));
}
@@ -2970,9 +3325,8 @@ df_bb_regs_lives_compare (struct df *df, basic_block bb, rtx reg1, rtx reg2)
/* The regs must be local to BB. */
- if (df_regno_bb (df, regno1) != bb
- || df_regno_bb (df, regno2) != bb)
- abort ();
+ gcc_assert (df_regno_bb (df, regno1) == bb
+ && df_regno_bb (df, regno2) == bb);
def2 = df_bb_regno_first_def_find (df, bb, regno2);
use1 = df_bb_regno_last_use_find (df, bb, regno1);
@@ -2993,7 +3347,7 @@ df_bb_regs_lives_compare (struct df *df, basic_block bb, rtx reg1, rtx reg2)
/* Return last use of REGNO within BB. */
-static struct ref *
+struct ref *
df_bb_regno_last_use_find (struct df *df, basic_block bb, unsigned int regno)
{
struct df_link *link;
@@ -3014,7 +3368,7 @@ df_bb_regno_last_use_find (struct df *df, basic_block bb, unsigned int regno)
/* Return first def of REGNO within BB. */
-static struct ref *
+struct ref *
df_bb_regno_first_def_find (struct df *df, basic_block bb, unsigned int regno)
{
struct df_link *link;
@@ -3033,6 +3387,31 @@ df_bb_regno_first_def_find (struct df *df, basic_block bb, unsigned int regno)
return 0;
}
+/* Return last def of REGNO within BB. */
+struct ref *
+df_bb_regno_last_def_find (struct df *df, basic_block bb, unsigned int regno)
+{
+ struct df_link *link;
+ struct ref *last_def = NULL;
+ int in_bb = 0;
+
+ /* This assumes that the reg-def list is ordered such that for any
+ BB, the first def is found first. However, since the BBs are not
+ ordered, the first def in the chain is not necessarily the first
+ def in the function. */
+ for (link = df->regs[regno].defs; link; link = link->next)
+ {
+ struct ref *def = link->ref;
+ /* The first time in the desired block. */
+ if (DF_REF_BB (def) == bb)
+ in_bb = 1;
+ /* The last def in the desired block. */
+ else if (in_bb)
+ return last_def;
+ last_def = def;
+ }
+ return last_def;
+}
/* Return first use of REGNO inside INSN within BB. */
static struct ref *
@@ -3091,8 +3470,7 @@ df_bb_single_def_use_insn_find (struct df *df, basic_block bb, rtx insn, rtx reg
def = df_bb_insn_regno_first_def_find (df, bb, insn, REGNO (reg));
- if (! def)
- abort ();
+ gcc_assert (def);
du_link = DF_REF_CHAIN (def);
@@ -3453,354 +3831,193 @@ debug_df_chain (struct df_link *link)
}
-/* Hybrid search algorithm from "Implementation Techniques for
- Efficient Data-Flow Analysis of Large Programs". */
static void
-hybrid_search_bitmap (basic_block block, bitmap *in, bitmap *out, bitmap *gen,
- bitmap *kill, enum df_flow_dir dir,
- enum df_confluence_op conf_op,
- transfer_function_bitmap transfun, sbitmap visited,
- sbitmap pending, void *data)
+dataflow_set_a_op_b (enum set_representation repr,
+ enum df_confluence_op op,
+ void *rslt, void *op1, void *op2)
{
- int changed;
- int i = block->index;
- edge e;
- basic_block bb = block;
-
- SET_BIT (visited, block->index);
- if (TEST_BIT (pending, block->index))
+ switch (repr)
{
- if (dir == DF_FORWARD)
- {
- /* Calculate <conf_op> of predecessor_outs. */
- bitmap_zero (in[i]);
- for (e = bb->pred; e != 0; e = e->pred_next)
- {
- if (e->src == ENTRY_BLOCK_PTR)
- continue;
- switch (conf_op)
- {
- case DF_UNION:
- bitmap_a_or_b (in[i], in[i], out[e->src->index]);
- break;
- case DF_INTERSECTION:
- bitmap_a_and_b (in[i], in[i], out[e->src->index]);
- break;
- }
- }
- }
- else
- {
- /* Calculate <conf_op> of successor ins. */
- bitmap_zero (out[i]);
- for (e = bb->succ; e != 0; e = e->succ_next)
- {
- if (e->dest == EXIT_BLOCK_PTR)
- continue;
- switch (conf_op)
- {
- case DF_UNION:
- bitmap_a_or_b (out[i], out[i], in[e->dest->index]);
- break;
- case DF_INTERSECTION:
- bitmap_a_and_b (out[i], out[i], in[e->dest->index]);
- break;
- }
- }
- }
- /* Common part */
- (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
- RESET_BIT (pending, i);
- if (changed)
+ case SR_SBITMAP:
+ switch (op)
{
- if (dir == DF_FORWARD)
- {
- for (e = bb->succ; e != 0; e = e->succ_next)
- {
- if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
- continue;
- SET_BIT (pending, e->dest->index);
- }
- }
- else
- {
- for (e = bb->pred; e != 0; e = e->pred_next)
- {
- if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
- continue;
- SET_BIT (pending, e->src->index);
- }
- }
+ case DF_UNION:
+ sbitmap_a_or_b (rslt, op1, op2);
+ break;
+
+ case DF_INTERSECTION:
+ sbitmap_a_and_b (rslt, op1, op2);
+ break;
+
+ default:
+ gcc_unreachable ();
}
- }
- if (dir == DF_FORWARD)
- {
- for (e = bb->succ; e != 0; e = e->succ_next)
+ break;
+
+ case SR_BITMAP:
+ switch (op)
{
- if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
- continue;
- if (!TEST_BIT (visited, e->dest->index))
- hybrid_search_bitmap (e->dest, in, out, gen, kill, dir,
- conf_op, transfun, visited, pending,
- data);
+ case DF_UNION:
+ bitmap_a_or_b (rslt, op1, op2);
+ break;
+
+ case DF_INTERSECTION:
+ bitmap_a_and_b (rslt, op1, op2);
+ break;
+
+ default:
+ gcc_unreachable ();
}
+ break;
+
+ default:
+ gcc_unreachable ();
}
- else
+}
+
+static void
+dataflow_set_copy (enum set_representation repr, void *dest, void *src)
+{
+ switch (repr)
{
- for (e = bb->pred; e != 0; e = e->pred_next)
- {
- if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
- continue;
- if (!TEST_BIT (visited, e->src->index))
- hybrid_search_bitmap (e->src, in, out, gen, kill, dir,
- conf_op, transfun, visited, pending,
- data);
- }
+ case SR_SBITMAP:
+ sbitmap_copy (dest, src);
+ break;
+
+ case SR_BITMAP:
+ bitmap_copy (dest, src);
+ break;
+
+ default:
+ gcc_unreachable ();
}
}
+/* Hybrid search algorithm from "Implementation Techniques for
+ Efficient Data-Flow Analysis of Large Programs". */
-/* Hybrid search for sbitmaps, rather than bitmaps. */
static void
-hybrid_search_sbitmap (basic_block block, sbitmap *in, sbitmap *out,
- sbitmap *gen, sbitmap *kill, enum df_flow_dir dir,
- enum df_confluence_op conf_op,
- transfer_function_sbitmap transfun, sbitmap visited,
- sbitmap pending, void *data)
+hybrid_search (basic_block bb, struct dataflow *dataflow,
+ sbitmap visited, sbitmap pending, sbitmap considered)
{
int changed;
- int i = block->index;
+ int i = bb->index;
edge e;
- basic_block bb = block;
-
- SET_BIT (visited, block->index);
- if (TEST_BIT (pending, block->index))
- {
- if (dir == DF_FORWARD)
- {
- /* Calculate <conf_op> of predecessor_outs. */
- sbitmap_zero (in[i]);
- for (e = bb->pred; e != 0; e = e->pred_next)
- {
- if (e->src == ENTRY_BLOCK_PTR)
- continue;
- switch (conf_op)
- {
- case DF_UNION:
- sbitmap_a_or_b (in[i], in[i], out[e->src->index]);
- break;
- case DF_INTERSECTION:
- sbitmap_a_and_b (in[i], in[i], out[e->src->index]);
- break;
- }
- }
- }
- else
- {
- /* Calculate <conf_op> of successor ins. */
- sbitmap_zero (out[i]);
- for (e = bb->succ; e != 0; e = e->succ_next)
- {
- if (e->dest == EXIT_BLOCK_PTR)
- continue;
- switch (conf_op)
- {
- case DF_UNION:
- sbitmap_a_or_b (out[i], out[i], in[e->dest->index]);
- break;
- case DF_INTERSECTION:
- sbitmap_a_and_b (out[i], out[i], in[e->dest->index]);
- break;
- }
- }
- }
- /* Common part. */
- (*transfun)(i, &changed, in[i], out[i], gen[i], kill[i], data);
- RESET_BIT (pending, i);
- if (changed)
- {
- if (dir == DF_FORWARD)
- {
- for (e = bb->succ; e != 0; e = e->succ_next)
- {
- if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
- continue;
- SET_BIT (pending, e->dest->index);
- }
- }
- else
- {
- for (e = bb->pred; e != 0; e = e->pred_next)
- {
- if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
- continue;
- SET_BIT (pending, e->src->index);
- }
- }
- }
- }
- if (dir == DF_FORWARD)
- {
- for (e = bb->succ; e != 0; e = e->succ_next)
- {
- if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
- continue;
- if (!TEST_BIT (visited, e->dest->index))
- hybrid_search_sbitmap (e->dest, in, out, gen, kill, dir,
- conf_op, transfun, visited, pending,
- data);
- }
- }
+ edge_iterator ei;
+
+ SET_BIT (visited, bb->index);
+ gcc_assert (TEST_BIT (pending, bb->index));
+ RESET_BIT (pending, i);
+
+#define HS(E_ANTI, E_ANTI_BB, E_ANTI_START_BB, IN_SET, \
+ E, E_BB, E_START_BB, OUT_SET) \
+ do \
+ { \
+ /* Calculate <conf_op> of predecessor_outs. */ \
+ bitmap_zero (IN_SET[i]); \
+ FOR_EACH_EDGE (e, ei, bb->E_ANTI) \
+ { \
+ if (e->E_ANTI_BB == E_ANTI_START_BB) \
+ continue; \
+ if (!TEST_BIT (considered, e->E_ANTI_BB->index)) \
+ continue; \
+ \
+ dataflow_set_a_op_b (dataflow->repr, dataflow->conf_op, \
+ IN_SET[i], IN_SET[i], \
+ OUT_SET[e->E_ANTI_BB->index]); \
+ } \
+ \
+ (*dataflow->transfun)(i, &changed, \
+ dataflow->in[i], dataflow->out[i], \
+ dataflow->gen[i], dataflow->kill[i], \
+ dataflow->data); \
+ \
+ if (!changed) \
+ break; \
+ \
+ FOR_EACH_EDGE (e, ei, bb->E) \
+ { \
+ if (e->E_BB == E_START_BB || e->E_BB->index == i) \
+ continue; \
+ \
+ if (!TEST_BIT (considered, e->E_BB->index)) \
+ continue; \
+ \
+ SET_BIT (pending, e->E_BB->index); \
+ } \
+ \
+ FOR_EACH_EDGE (e, ei, bb->E) \
+ { \
+ if (e->E_BB == E_START_BB || e->E_BB->index == i) \
+ continue; \
+ \
+ if (!TEST_BIT (considered, e->E_BB->index)) \
+ continue; \
+ \
+ if (!TEST_BIT (visited, e->E_BB->index)) \
+ hybrid_search (e->E_BB, dataflow, visited, pending, considered); \
+ } \
+ } while (0)
+
+ if (dataflow->dir == DF_FORWARD)
+ HS (preds, src, ENTRY_BLOCK_PTR, dataflow->in,
+ succs, dest, EXIT_BLOCK_PTR, dataflow->out);
else
- {
- for (e = bb->pred; e != 0; e = e->pred_next)
- {
- if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
- continue;
- if (!TEST_BIT (visited, e->src->index))
- hybrid_search_sbitmap (e->src, in, out, gen, kill, dir,
- conf_op, transfun, visited, pending,
- data);
- }
- }
+ HS (succs, dest, EXIT_BLOCK_PTR, dataflow->out,
+ preds, src, ENTRY_BLOCK_PTR, dataflow->in);
}
-
-/* gen = GEN set.
- kill = KILL set.
- in, out = Filled in by function.
- blocks = Blocks to analyze.
- dir = Dataflow direction.
- conf_op = Confluence operation.
- transfun = Transfer function.
- order = Order to iterate in. (Should map block numbers -> order)
- data = Whatever you want. It's passed to the transfer function.
-
- This function will perform iterative bitvector dataflow, producing
- the in and out sets. Even if you only want to perform it for a
- small number of blocks, the vectors for in and out must be large
- enough for *all* blocks, because changing one block might affect
- others. However, it'll only put what you say to analyze on the
- initial worklist.
+/* This function will perform iterative bitvector dataflow described by
+ DATAFLOW, producing the in and out sets. Only the part of the cfg
+ induced by blocks in DATAFLOW->order is taken into account.
For forward problems, you probably want to pass in a mapping of
- block number to rc_order (like df->inverse_rc_map).
-*/
+ block number to rc_order (like df->inverse_rc_map). */
+
void
-iterative_dataflow_sbitmap (sbitmap *in, sbitmap *out, sbitmap *gen,
- sbitmap *kill, bitmap blocks,
- enum df_flow_dir dir,
- enum df_confluence_op conf_op,
- transfer_function_sbitmap transfun, int *order,
- void *data)
+iterative_dataflow (struct dataflow *dataflow)
{
- int i;
- fibheap_t worklist;
- basic_block bb;
- sbitmap visited, pending;
+ unsigned i, idx;
+ sbitmap visited, pending, considered;
pending = sbitmap_alloc (last_basic_block);
visited = sbitmap_alloc (last_basic_block);
+ considered = sbitmap_alloc (last_basic_block);
sbitmap_zero (pending);
sbitmap_zero (visited);
- worklist = fibheap_new ();
-
- EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
- {
- fibheap_insert (worklist, order[i], (void *) (size_t) i);
- SET_BIT (pending, i);
- if (dir == DF_FORWARD)
- sbitmap_copy (out[i], gen[i]);
- else
- sbitmap_copy (in[i], gen[i]);
- });
+ sbitmap_zero (considered);
- while (sbitmap_first_set_bit (pending) != -1)
+ for (i = 0; i < dataflow->n_blocks; i++)
{
- while (!fibheap_empty (worklist))
- {
- i = (size_t) fibheap_extract_min (worklist);
- bb = BASIC_BLOCK (i);
- if (!TEST_BIT (visited, bb->index))
- hybrid_search_sbitmap (bb, in, out, gen, kill, dir,
- conf_op, transfun, visited, pending, data);
- }
-
- if (sbitmap_first_set_bit (pending) != -1)
- {
- EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
- {
- fibheap_insert (worklist, order[i], (void *) (size_t) i);
- });
- sbitmap_zero (visited);
- }
+ idx = dataflow->order[i];
+ SET_BIT (pending, idx);
+ SET_BIT (considered, idx);
+ if (dataflow->dir == DF_FORWARD)
+ dataflow_set_copy (dataflow->repr,
+ dataflow->out[idx], dataflow->gen[idx]);
else
- {
- break;
- }
- }
+ dataflow_set_copy (dataflow->repr,
+ dataflow->in[idx], dataflow->gen[idx]);
+ };
- sbitmap_free (pending);
- sbitmap_free (visited);
- fibheap_delete (worklist);
-}
-
-
-/* Exactly the same as iterative_dataflow_sbitmap, except it works on
- bitmaps instead. */
-void
-iterative_dataflow_bitmap (bitmap *in, bitmap *out, bitmap *gen, bitmap *kill,
- bitmap blocks, enum df_flow_dir dir,
- enum df_confluence_op conf_op,
- transfer_function_bitmap transfun, int *order,
- void *data)
-{
- int i;
- fibheap_t worklist;
- basic_block bb;
- sbitmap visited, pending;
-
- pending = sbitmap_alloc (last_basic_block);
- visited = sbitmap_alloc (last_basic_block);
- sbitmap_zero (pending);
- sbitmap_zero (visited);
- worklist = fibheap_new ();
-
- EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
- {
- fibheap_insert (worklist, order[i], (void *) (size_t) i);
- SET_BIT (pending, i);
- if (dir == DF_FORWARD)
- bitmap_copy (out[i], gen[i]);
- else
- bitmap_copy (in[i], gen[i]);
- });
-
- while (sbitmap_first_set_bit (pending) != -1)
+ while (1)
{
- while (!fibheap_empty (worklist))
+ for (i = 0; i < dataflow->n_blocks; i++)
{
- i = (size_t) fibheap_extract_min (worklist);
- bb = BASIC_BLOCK (i);
- if (!TEST_BIT (visited, bb->index))
- hybrid_search_bitmap (bb, in, out, gen, kill, dir,
- conf_op, transfun, visited, pending, data);
- }
+ idx = dataflow->order[i];
- if (sbitmap_first_set_bit (pending) != -1)
- {
- EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i,
- {
- fibheap_insert (worklist, order[i], (void *) (size_t) i);
- });
- sbitmap_zero (visited);
- }
- else
- {
- break;
+ if (TEST_BIT (pending, idx) && !TEST_BIT (visited, idx))
+ hybrid_search (BASIC_BLOCK (idx), dataflow,
+ visited, pending, considered);
}
+
+ if (sbitmap_first_set_bit (pending) == -1)
+ break;
+
+ sbitmap_zero (visited);
}
+
sbitmap_free (pending);
sbitmap_free (visited);
- fibheap_delete (worklist);
+ sbitmap_free (considered);
}
diff --git a/gcc/df.h b/gcc/df.h
index 0c89248e995..25a009bed9e 100644
--- a/gcc/df.h
+++ b/gcc/df.h
@@ -1,6 +1,6 @@
/* Form lists of pseudo register references for autoinc optimization
for GNU compiler. This is part of flow optimization.
- Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz)
This file is part of GCC.
@@ -20,18 +20,24 @@ along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA. */
-#define DF_RD 1 /* Reaching definitions. */
-#define DF_RU 2 /* Reaching uses. */
-#define DF_LR 4 /* Live registers. */
-#define DF_DU_CHAIN 8 /* Def-use chain. */
-#define DF_UD_CHAIN 16 /* Use-def chain. */
+#ifndef GCC_DF_H
+#define GCC_DF_H
+
+#include "bitmap.h"
+#include "basic-block.h"
+
+#define DF_RD 1 /* Reaching definitions. */
+#define DF_RU 2 /* Reaching uses. */
+#define DF_LR 4 /* Live registers. */
+#define DF_DU_CHAIN 8 /* Def-use chain. */
+#define DF_UD_CHAIN 16 /* Use-def chain. */
#define DF_REG_INFO 32 /* Register info. */
#define DF_RD_CHAIN 64 /* Reg-def chain. */
-#define DF_RU_CHAIN 128 /* Reg-use chain. */
-#define DF_ALL 255
-#define DF_HARD_REGS 1024 /* Mark hard registers. */
-#define DF_EQUIV_NOTES 2048 /* Mark uses present in EQUIV/EQUAL notes. */
-#define DF_FOR_REGALLOC 4096 /* If called for the register allocator. */
+#define DF_RU_CHAIN 128 /* Reg-use chain. */
+#define DF_ALL 255
+#define DF_HARD_REGS 1024 /* Mark hard registers. */
+#define DF_EQUIV_NOTES 2048 /* Mark uses present in EQUIV/EQUAL notes. */
+#define DF_FOR_REGALLOC 4096 /* If called for the register allocator. */
enum df_ref_type {DF_REF_REG_DEF, DF_REF_REG_CLOBBER,
DF_REF_REG_USE, DF_REF_REG_MEM_LOAD,
@@ -56,7 +62,7 @@ enum df_ref_flags
/* This flag is set on register references inside a subreg on
machines which have CANNOT_CHANGE_MODE_CLASS.
Note, that this flag can also be set on df_refs representing
- the REG itself (i.e., one might not see the subreg anyore).
+ the REG itself (i.e., one might not see the subreg anymore).
Also note, that this flag is set also for hardreg refs, i.e.,
you must check yourself if it's a pseudo. */
DF_REF_MODE_CHANGE = 2,
@@ -90,6 +96,7 @@ struct ref
unsigned int id; /* Ref index. */
enum df_ref_type type; /* Type of ref. */
enum df_ref_flags flags; /* Various flags. */
+ void *data; /* The data assigned to it by user. */
};
@@ -163,9 +170,6 @@ struct df
bitmap insns_modified; /* Insns that (may) have changed. */
bitmap bbs_modified; /* Blocks that (may) have changed. */
bitmap all_blocks; /* All blocks in CFG. */
- /* The sbitmap vector of dominators or NULL if not computed.
- Ideally, this should be a pointer to a CFG object. */
- sbitmap *dom;
int *dfs_order; /* DFS order -> block number. */
int *rc_order; /* Reverse completion order -> block number. */
int *rts_order; /* Reverse top sort order -> block number. */
@@ -202,6 +206,7 @@ struct df_map
#define DF_REF_CHAIN(REF) ((REF)->chain)
#define DF_REF_ID(REF) ((REF)->id)
#define DF_REF_FLAGS(REF) ((REF)->flags)
+#define DF_REF_DATA(REF) ((REF)->data)
/* Macros to determine the reference type. */
@@ -236,11 +241,12 @@ struct df_map
#define DF_INSN_USES(DF, INSN) ((DF)->insns[INSN_UID (INSN)].uses)
-/* Functions to build and analyse dataflow information. */
+/* Functions to build and analyze dataflow information. */
extern struct df *df_init (void);
-extern int df_analyse (struct df *, bitmap, int);
+extern int df_analyze (struct df *, bitmap, int);
+extern void df_analyze_subcfg (struct df *, bitmap, int);
extern void df_finish (struct df *);
@@ -298,7 +304,15 @@ extern int df_bb_regs_lives_compare (struct df *, basic_block, rtx, rtx);
extern rtx df_bb_single_def_use_insn_find (struct df *, basic_block, rtx,
rtx);
+extern struct ref *df_bb_regno_last_use_find (struct df *, basic_block, unsigned int);
+
+extern struct ref *df_bb_regno_first_def_find (struct df *, basic_block, unsigned int);
+extern struct ref *df_bb_regno_last_def_find (struct df *, basic_block, unsigned int);
+
+extern struct ref *df_find_def (struct df *, rtx, rtx);
+
+extern int df_reg_used (struct df *, rtx, rtx);
/* Functions for debugging from GDB. */
@@ -336,25 +350,43 @@ enum df_flow_dir
DF_BACKWARD
};
-typedef void (*transfer_function_sbitmap) (int, int *, sbitmap, sbitmap,
- sbitmap, sbitmap, void *);
-typedef void (*transfer_function_bitmap) (int, int *, bitmap, bitmap,
- bitmap, bitmap, void *);
+typedef void (*transfer_function) (int, int *, void *, void *,
+ void *, void *, void *);
+
+/* The description of a dataflow problem to solve. */
-extern void iterative_dataflow_sbitmap (sbitmap *, sbitmap *, sbitmap *,
- sbitmap *, bitmap, enum df_flow_dir,
- enum df_confluence_op,
- transfer_function_sbitmap,
- int *, void *);
+enum set_representation
+{
+ SR_SBITMAP, /* Represent sets by bitmaps. */
+ SR_BITMAP /* Represent sets by sbitmaps. */
+};
+
+struct dataflow
+{
+ enum set_representation repr; /* The way the sets are represented. */
+
+ /* The following arrays are indexed by block indices, so they must always
+ be large enough even if we restrict ourselves just to a subset of cfg. */
+ void **gen, **kill; /* Gen and kill sets. */
+ void **in, **out; /* Results. */
+
+ enum df_flow_dir dir; /* Dataflow direction. */
+ enum df_confluence_op conf_op; /* Confluence operator. */
+ unsigned n_blocks; /* Number of basic blocks in the
+ order. */
+ int *order; /* The list of basic blocks to work
+ with, in the order they should
+ be processed in. */
+ transfer_function transfun; /* The transfer function. */
+ void *data; /* Data used by the transfer
+ function. */
+};
-extern void iterative_dataflow_bitmap (bitmap *, bitmap *, bitmap *,
- bitmap *, bitmap,
- enum df_flow_dir,
- enum df_confluence_op,
- transfer_function_bitmap,
- int *, void *);
+extern void iterative_dataflow (struct dataflow *);
extern bool read_modify_subreg_p (rtx);
/* In cse.c */
extern void delete_trivially_dead_insns_df (rtx, int, struct df *);
+
+#endif /* GCC_DF_H */
diff --git a/gcc/pre-reload.c b/gcc/pre-reload.c
index fb4cc49cd76..8e539742576 100644
--- a/gcc/pre-reload.c
+++ b/gcc/pre-reload.c
@@ -110,10 +110,7 @@ static int indirect_levels = -1;
/* Create a link in a def-use or use-def chain. */
static inline struct ra_link *
-ra_link_create (ra_info, ref, next)
- struct ra_info *ra_info;
- ra_ref *ref;
- struct ra_link *next;
+ra_link_create (struct ra_info *ra_info, ra_ref *ref, struct ra_link *next)
{
struct ra_link *link;
@@ -132,12 +129,8 @@ ra_link_create (ra_info, ref, next)
Returns first insn emitted. */
static rtx
-gen_pre_reload (out, in, opnum, type, class)
- rtx out;
- rtx in;
- int opnum;
- enum reload_type type;
- enum reg_class class;
+gen_pre_reload (rtx out, rtx in, int opnum, enum reload_type type,
+ enum reg_class class)
{
rtx last = get_last_insn ();
@@ -343,10 +336,7 @@ gen_pre_reload (out, in, opnum, type, class)
Return the instruction that stores into RELOADREG. */
static rtx
-inc_for_pre_reload (reloadreg, in, value, inc_amount)
- rtx reloadreg;
- rtx in, value;
- int inc_amount;
+inc_for_pre_reload (rtx reloadreg, rtx in, rtx value, int inc_amount)
{
rtx last;
rtx inc;
@@ -432,11 +422,8 @@ inc_for_pre_reload (reloadreg, in, value, inc_amount)
has the number J. OLD contains the value to be used as input. */
static void
-emit_input_pre_reload_insns (insn, rl, old, j)
- rtx insn;
- struct reload *rl;
- rtx old;
- int j ATTRIBUTE_UNUSED;
+emit_input_pre_reload_insns (rtx insn, struct reload *rl, rtx old,
+ int j ATTRIBUTE_UNUSED)
{
rtx reloadreg = rl->reg_rtx;
rtx oldequiv = 0;
@@ -599,10 +586,7 @@ emit_input_pre_reload_insns (insn, rl, old, j)
??? At some point we need to support handling output reloads of
JUMP_INSNs or insns that set cc0. */
static void
-do_output_pre_reload (insn, rl, j)
- rtx insn;
- struct reload *rl;
- int j;
+do_output_pre_reload (rtx insn, struct reload *rl, int j)
{
rtx old;
/* If this is an output reload that stores something that is
@@ -631,11 +615,8 @@ do_output_pre_reload (insn, rl, j)
If SETS is nonzero, also consider SETs. */
static int
-pseudo_clobbered_p (regno, insn, mode, sets)
- unsigned int regno;
- rtx insn;
- enum machine_mode mode ATTRIBUTE_UNUSED;
- int sets;
+pseudo_clobbered_p (unsigned int regno, rtx insn,
+ enum machine_mode mode ATTRIBUTE_UNUSED, int sets)
{
if ((GET_CODE (PATTERN (insn)) == CLOBBER
|| (sets && GET_CODE (PATTERN (insn)) == SET))
@@ -671,10 +652,8 @@ pseudo_clobbered_p (regno, insn, mode, sets)
/* Generate insns to for the output reload RL, which is for the insn described
by CHAIN and has the number J. */
static void
-emit_output_pre_reload_insns (insn, rl, j)
- rtx insn;
- struct reload *rl;
- int j ATTRIBUTE_UNUSED;
+emit_output_pre_reload_insns (rtx insn, struct reload *rl,
+ int j ATTRIBUTE_UNUSED)
{
rtx reloadreg = rl->reg_rtx;
int special = 0;
@@ -762,10 +741,7 @@ emit_output_pre_reload_insns (insn, rl, j)
/* Do input reloading for reload RL, which is for the insn described by CHAIN
and has the number J. */
static void
-do_input_pre_reload (insn, rl, j)
- rtx insn;
- struct reload *rl;
- int j;
+do_input_pre_reload (rtx insn, struct reload *rl, int j)
{
rtx old = (rl->in && GET_CODE (rl->in) == MEM
? rl->in_reg : rl->in);
@@ -782,8 +758,7 @@ do_input_pre_reload (insn, rl, j)
/* Output insns to reload values in and out of the chosen reload regs. */
void
-emit_pre_reload_insns (insn)
- rtx insn;
+emit_pre_reload_insns (rtx insn)
{
int j;
rtx following_insn = NEXT_INSN (insn);
@@ -827,11 +802,11 @@ emit_pre_reload_insns (insn)
other_operand_reload_insns = 0;
/* Dump reloads into the dump file. */
- if (rtl_dump_file)
+ if (dump_file)
{
- fprintf (rtl_dump_file, "\nReloads for insn # %d\n", INSN_UID (insn));
- debug_reload_to_stream (rtl_dump_file);
- fprintf (rtl_dump_file, "\n");
+ fprintf (dump_file, "\nReloads for insn # %d\n", INSN_UID (insn));
+ debug_reload_to_stream (dump_file);
+ fprintf (dump_file, "\n");
}
/* Now output the instructions to copy the data into and out of the
@@ -903,8 +878,7 @@ emit_pre_reload_insns (insn)
Return the rtx that X translates into; usually X, but modified. */
void
-subst_pre_reloads (insn)
- rtx insn ATTRIBUTE_UNUSED;
+subst_pre_reloads (rtx insn ATTRIBUTE_UNUSED)
{
int i;
@@ -994,11 +968,9 @@ subst_pre_reloads (insn)
call find_reloads_address on the location being returned. */
rtx
-get_pre_secondary_mem (x, mode, opnum, type)
- rtx x ATTRIBUTE_UNUSED;
- enum machine_mode mode;
- int opnum ATTRIBUTE_UNUSED;
- enum reload_type type ATTRIBUTE_UNUSED;
+get_pre_secondary_mem (rtx x ATTRIBUTE_UNUSED, enum machine_mode mode,
+ int opnum ATTRIBUTE_UNUSED,
+ enum reload_type type ATTRIBUTE_UNUSED)
{
rtx loc;
@@ -1053,15 +1025,11 @@ get_pre_secondary_mem (x, mode, opnum, type)
is safe from the earlyclobber). */
static rtx
-find_dummy_pre_reload (real_in, real_out, inloc, outloc,
- inmode, outmode, class, for_real, earlyclobber)
- rtx real_in, real_out;
- rtx *inloc ATTRIBUTE_UNUSED;
- rtx *outloc;
- enum machine_mode inmode, outmode;
- enum reg_class class;
- int for_real ATTRIBUTE_UNUSED;
- int earlyclobber ATTRIBUTE_UNUSED;
+find_dummy_pre_reload (rtx real_in, rtx real_out, rtx *inloc ATTRIBUTE_UNUSED,
+ rtx *outloc, enum machine_mode inmode,
+ enum machine_mode outmode, enum reg_class class,
+ int for_real ATTRIBUTE_UNUSED,
+ int earlyclobber ATTRIBUTE_UNUSED)
{
rtx in = real_in;
rtx out = real_out;
@@ -1167,16 +1135,9 @@ find_dummy_pre_reload (real_in, real_out, inloc, outloc,
distinguish them. */
static int
-push_pre_reload (in, out, inloc, outloc, class,
- inmode, outmode, strict_low, optional, opnum, type)
- rtx in, out;
- rtx *inloc, *outloc;
- enum reg_class class;
- enum machine_mode inmode, outmode;
- int strict_low;
- int optional;
- int opnum;
- enum reload_type type;
+push_pre_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, enum reg_class class,
+ enum machine_mode inmode, enum machine_mode outmode,
+ int strict_low, int optional, int opnum, enum reload_type type)
{
int i;
int dont_share = 0;
@@ -1621,8 +1582,7 @@ push_pre_reload (in, out, inloc, outloc, class,
so we set the SAFE field. */
static struct decomposition
-pre_reload_decompose (x)
- rtx x;
+pre_reload_decompose (rtx x)
{
struct decomposition val;
int all_const = 0;
@@ -1750,7 +1710,10 @@ pre_reload_decompose (x)
else if (CONSTANT_P (x)
/* This hasn't been assigned yet, so it can't conflict yet. */
|| GET_CODE (x) == SCRATCH)
- val.safe = 1;
+ {
+ val.start = val.end = 0;
+ val.safe = 1;
+ }
else
abort ();
return val;
@@ -1760,9 +1723,7 @@ pre_reload_decompose (x)
Y is also described by YDATA, which should be decompose (Y). */
static int
-pre_reload_immune_p (x, y, ydata)
- rtx x, y;
- struct decomposition ydata;
+pre_reload_immune_p (rtx x, rtx y, struct decomposition ydata)
{
struct decomposition xdata;
@@ -1825,15 +1786,9 @@ pre_reload_immune_p (x, y, ydata)
result of find_reloads_address. */
static rtx
-find_pre_reloads_toplev (x, opnum, type, ind_levels, is_set_dest, insn,
- address_reloaded)
- rtx x;
- int opnum;
- enum reload_type type;
- int ind_levels;
- int is_set_dest;
- rtx insn;
- int *address_reloaded;
+find_pre_reloads_toplev (rtx x, int opnum, enum reload_type type,
+ int ind_levels, int is_set_dest, rtx insn,
+ int *address_reloaded)
{
RTX_CODE code = GET_CODE (x);
@@ -1916,10 +1871,8 @@ find_pre_reloads_toplev (x, opnum, type, ind_levels, is_set_dest, insn,
If REG will occupies multiple hard regs, all of them must be in CLASS. */
static int
-pseudo_fits_class_p (operand, class, mode)
- rtx operand ATTRIBUTE_UNUSED;
- enum reg_class class;
- enum machine_mode mode;
+pseudo_fits_class_p (rtx operand ATTRIBUTE_UNUSED, enum reg_class class,
+ enum machine_mode mode)
{
static int max_consecutive[FIRST_PSEUDO_REGISTER];
int i;
@@ -1963,9 +1916,7 @@ static ra_ref * scan_addr_create_ref PARAMS ((struct ra_info *, rtx *,
FIXME: Must be substituted by define_address. */
static int
-scan_addr_func (loc, scan_state)
- rtx *loc;
- struct scan_addr_state *scan_state;
+scan_addr_func (rtx *loc, struct scan_addr_state *scan_state)
{
switch (GET_CODE (*loc))
{
@@ -2053,11 +2004,9 @@ scan_addr_func (loc, scan_state)
/* Helper for scan_addr_func. Create, fill and return re_ref structure. */
static ra_ref *
-scan_addr_create_ref (ra_info, loc, scan_state, ref_type)
- struct ra_info *ra_info;
- rtx *loc;
- struct scan_addr_state *scan_state;
- enum ra_ref_type ref_type;
+scan_addr_create_ref (struct ra_info *ra_info, rtx *loc,
+ struct scan_addr_state *scan_state,
+ enum ra_ref_type ref_type)
{
ra_ref *ref = (ra_ref *)obstack_alloc (&ra_info->obstack, sizeof (ra_ref));
ref->type = ref_type;
@@ -2088,8 +2037,7 @@ scan_addr_create_ref (ra_info, loc, scan_state, ref_type)
}
static int
-pre_operands_match_p (x, y)
- rtx x, y;
+pre_operands_match_p (rtx x, rtx y)
{
rtx s = (GET_CODE (x) == SUBREG) ? x : y;
rtx t = (s == x) ? y : x;
@@ -2125,8 +2073,7 @@ pre_operands_match_p (x, y)
swapping) are a subset of the conflicts of the other one. */
static int
-prefer_swapped (insn, op0, op1)
- rtx insn, op0, op1;
+prefer_swapped (rtx insn, rtx op0, rtx op1)
{
basic_block bb = BLOCK_FOR_INSN (insn);
if (GET_CODE (op0) == SUBREG)
@@ -2144,7 +2091,7 @@ prefer_swapped (insn, op0, op1)
if (find_reg_note (insn, REG_DEAD, op1))
return 1;
insn = NEXT_INSN (insn);
- if (insn == bb->end)
+ if (insn == BB_END (bb))
break;
}
return 0;
@@ -2177,17 +2124,13 @@ static int scan_alternative PARAMS ((struct alternative_info [], char *[],
/* Scan one alternative and fill alternative info. */
static int
-scan_alternative (this_alt, constraints, modified, address_reloaded,
- operands_match, swapped, commut, pfree, prej)
- struct alternative_info this_alt[];
- char *constraints[];
- enum reload_usage modified[];
- int address_reloaded[];
- char operands_match[MAX_RECOG_OPERANDS][MAX_RECOG_OPERANDS];
- int swapped;
- int *commut;
- int *pfree;
- int *prej;
+scan_alternative (struct alternative_info this_alt[],
+ char *constraints[],
+ enum reload_usage modified[],
+ int address_reloaded[],
+ char operands_match[MAX_RECOG_OPERANDS][MAX_RECOG_OPERANDS],
+ int swapped,
+ int *commut, int *pfree, int *prej)
{
int i;
int j;
@@ -2869,13 +2812,9 @@ scan_alternative (this_alt, constraints, modified, address_reloaded,
N_DEFS will be set to count of defs and N_USES to count of uses. */
static void
-collect_insn_info (ra_info, insn, def_refs, use_refs, n_defs, n_uses)
- struct ra_info *ra_info;
- rtx insn;
- ra_ref **def_refs;
- ra_ref **use_refs;
- int *n_defs;
- int *n_uses;
+collect_insn_info (struct ra_info *ra_info, rtx insn,
+ ra_ref **def_refs, ra_ref **use_refs,
+ int *n_defs, int *n_uses)
{
int insn_code_number;
int i, j;
@@ -3612,8 +3551,7 @@ collect_insn_info (ra_info, insn, def_refs, use_refs, n_defs, n_uses)
/* Recognize the insn INSN and check constraints validity very
strictly. */
int
-ra_check_constraints (insn)
- rtx insn;
+ra_check_constraints (rtx insn)
{
int insn_code_number;
int i;
@@ -3881,9 +3819,7 @@ ra_check_constraints (insn)
/* Increase the insn info table for handling SIZE elements. */
static void
-ra_insn_table_realloc (ra_info, size)
- struct ra_info *ra_info;
- int size;
+ra_insn_table_realloc (struct ra_info *ra_info, int size)
{
/* Make table 25 percent larger by default. */
if (! size)
@@ -3904,9 +3840,7 @@ ra_insn_table_realloc (ra_info, size)
/* Increase the reg info table for handling SIZE elements. */
static void
-ra_reg_table_realloc (ra_info, size)
- struct ra_info *ra_info;
- int size;
+ra_reg_table_realloc (struct ra_info *ra_info, int size)
{
/* Make table 25 percent larger by default. */
if (! size)
@@ -3926,8 +3860,7 @@ ra_reg_table_realloc (ra_info, size)
/* Allocate and initialise dataflow memory. */
struct ra_info *
-ra_info_init (n_regs)
- int n_regs;
+ra_info_init (int n_regs)
{
int n_insns;
struct ra_info *ra_info;
@@ -3962,8 +3895,7 @@ ra_info_init (n_regs)
/* Free all the dataflow info. */
void
-ra_info_free (ra_info)
- struct ra_info *ra_info;
+ra_info_free (struct ra_info *ra_info)
{
if (ra_info->insns)
free (ra_info->insns);
@@ -3984,9 +3916,7 @@ ra_info_free (ra_info)
/* Print all defs/uses for INSN from RA_INFO. */
static void
-debug_ra_insn_refs (ra_info, insn)
- struct ra_info *ra_info;
- rtx insn;
+debug_ra_insn_refs (struct ra_info *ra_info, rtx insn)
{
int i;
struct ra_link *link;
@@ -4102,10 +4032,7 @@ debug_ra_reg_refs (struct ra_info *ra_info, int regno)
ra_info->insns array can be reallocated. */
static void
-ra_info_add_insn_refs (ra_info, insn, refs)
- struct ra_info *ra_info;
- rtx insn;
- struct ra_refs *refs;
+ra_info_add_insn_refs (struct ra_info *ra_info, rtx insn, struct ra_refs *refs)
{
int uid = INSN_UID (insn);
@@ -4120,10 +4047,8 @@ ra_info_add_insn_refs (ra_info, insn, refs)
/* Add reference to ra_info->regs array for each register of INSN.
ra_info->regs array can be reallocated. */
static void
-ra_info_add_reg_refs (ra_info, insn, refs)
- struct ra_info *ra_info;
- rtx insn ATTRIBUTE_UNUSED;
- struct ra_refs *refs;
+ra_info_add_reg_refs (struct ra_info *ra_info, rtx insn ATTRIBUTE_UNUSED,
+ struct ra_refs *refs)
{
int regno;
struct ra_link *link;
@@ -4171,12 +4096,9 @@ ra_info_add_reg_refs (ra_info, insn, refs)
List builded from plain arrays DEF_REFS and USE_REFS. */
static struct ra_refs *
-build_ra_refs_for_insn (ra_info, def_refs, use_refs, n_defs, n_uses)
- struct ra_info *ra_info;
- ra_ref **def_refs;
- ra_ref **use_refs;
- int n_defs;
- int n_uses;
+build_ra_refs_for_insn (struct ra_info *ra_info,
+ ra_ref **def_refs, ra_ref **use_refs,
+ int n_defs, int n_uses)
{
int n;
struct ra_refs *ra_refs
@@ -4287,9 +4209,7 @@ df_link2ra_link (struct df2ra df2ra, rtx insn ATTRIBUTE_UNUSED,
}
struct df2ra
-build_df2ra (df, ra_info)
- struct df *df;
- struct ra_info *ra_info;
+build_df2ra (struct df *df, struct ra_info *ra_info)
{
struct df2ra df2ra;
rtx insn;
@@ -4322,9 +4242,7 @@ build_df2ra (df, ra_info)
}
void
-pre_reload (ra_info, modified)
- struct ra_info *ra_info;
- bitmap modified;
+pre_reload (struct ra_info *ra_info, bitmap modified)
{
int j;
int max;
@@ -4355,9 +4273,9 @@ pre_reload (ra_info, modified)
if (modified)
{
+ bitmap_iterator bi;
max = 0;
- EXECUTE_IF_SET_IN_BITMAP
- (modified, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (modified, 0, j, bi)
{
if (j > max)
max = j;
@@ -4366,7 +4284,7 @@ pre_reload (ra_info, modified)
ra_info_remove_refs (ra_info, ra_info->insns[j]);
ra_info->insns[j] = NULL;
}
- });
+ }
if (max >= ra_info->insn_size)
ra_insn_table_realloc (ra_info, max);
}
@@ -4375,9 +4293,7 @@ pre_reload (ra_info, modified)
/* Collect all defs/uses for all insns. */
static void
-pre_reload_collect (ra_info, modified)
- struct ra_info *ra_info;
- bitmap modified;
+pre_reload_collect (struct ra_info *ra_info, bitmap modified)
{
rtx insn;
int cnt;
@@ -4390,8 +4306,8 @@ pre_reload_collect (ra_info, modified)
FOR_EACH_BB (bb)
{
- for (insn = bb->head;
- insn && PREV_INSN (insn) != bb->end;
+ for (insn = BB_HEAD (bb);
+ insn && PREV_INSN (insn) != BB_END (bb);
insn = NEXT_INSN (insn))
{
enum rtx_code pat_code;
@@ -4441,24 +4357,24 @@ pre_reload_collect (ra_info, modified)
if (ra_pass > 1)
abort ();
#endif
- if (rtl_dump_file)
+ if (dump_file)
{
- fprintf (rtl_dump_file, "Reload for insn:\n");
- print_rtl_single (rtl_dump_file, insn);
- fprintf (rtl_dump_file, "\n");
+ fprintf (dump_file, "Reload for insn:\n");
+ print_rtl_single (dump_file, insn);
+ fprintf (dump_file, "\n");
}
emit_pre_reload_insns (insn);
subst_pre_reloads (insn);
- if (rtl_dump_file)
+ if (dump_file)
{
- fprintf (rtl_dump_file, "Reload results:\n");
+ fprintf (dump_file, "Reload results:\n");
for (deb_insn = NEXT_INSN (before); deb_insn != after;
deb_insn = NEXT_INSN (deb_insn))
{
- print_rtl_single (rtl_dump_file, deb_insn);
- fprintf (rtl_dump_file, "\n");
+ print_rtl_single (dump_file, deb_insn);
+ fprintf (dump_file, "\n");
}
}
@@ -4483,19 +4399,19 @@ pre_reload_collect (ra_info, modified)
insn = NEXT_INSN (insn);
}
/* Keep basic block info up to date. */
- if (bb->head == orig_insn)
+ if (BB_HEAD (bb) == orig_insn)
{
if (prev)
- bb->head = NEXT_INSN (prev);
+ BB_HEAD (bb) = NEXT_INSN (prev);
else
- bb->head = get_insns ();
+ BB_HEAD (bb) = get_insns ();
}
- if (bb->end == orig_insn)
+ if (BB_END (bb) == orig_insn)
{
if (next)
- bb->end = PREV_INSN (next);
+ BB_END (bb) = PREV_INSN (next);
else
- bb->end = get_last_insn ();
+ BB_END (bb) = get_last_insn ();
}
}
}
@@ -4503,9 +4419,7 @@ pre_reload_collect (ra_info, modified)
/* Remove all references which referenced in REFS. */
static void
-ra_info_remove_refs (ra_info, refs)
- struct ra_info *ra_info;
- struct ra_refs *refs;
+ra_info_remove_refs (struct ra_info *ra_info, struct ra_refs *refs)
{
int regno;
struct ra_link *link;
@@ -4547,8 +4461,7 @@ ra_info_remove_refs (ra_info, refs)
/* Compare constraints based incrementally updated ra_info with ra_info
collected from scratch. */
void
-compare_ra_info (ra1)
- struct ra_info *ra1;
+compare_ra_info (struct ra_info *ra1)
{
struct ra_info *ra2;
rtx insn;
@@ -4671,11 +4584,7 @@ static int num_changes = 0;
Otherwise, perform the change and return 1. */
int
-ra_validate_change (object, loc, new, in_group)
- rtx object;
- rtx *loc;
- rtx new;
- int in_group;
+ra_validate_change (rtx object, rtx *loc, rtx new, int in_group)
{
rtx old = *loc;
@@ -4729,7 +4638,7 @@ ra_validate_change (object, loc, new, in_group)
Return 1 if all changes are valid, zero otherwise. */
int
-ra_apply_change_group ()
+ra_apply_change_group (void)
{
int i;
rtx last_validated = NULL_RTX;
@@ -4783,8 +4692,7 @@ ra_apply_change_group ()
/* Retract the changes numbered NUM and up. */
void
-ra_cancel_changes (num)
- int num;
+ra_cancel_changes (int num)
{
int i;
diff --git a/gcc/pre-reload.h b/gcc/pre-reload.h
index 0343cbb5a63..25d68840d45 100644
--- a/gcc/pre-reload.h
+++ b/gcc/pre-reload.h
@@ -22,8 +22,8 @@ Boston, MA 02111-1307, USA. */
/* Compare two RTX's. */
#define MATCHES(x, y) \
- (x == y || (x != 0 && (GET_CODE (x) == REG \
- ? GET_CODE (y) == REG && REGNO (x) == REGNO (y) \
+ (x == y || (x != 0 && (REG_P (x) \
+ ? REG_P (y) && REGNO (x) == REGNO (y) \
: rtx_equal_p (x, y) && ! side_effects_p (x))))
/* Indicates if two reloads purposes are for similar enough things that we
diff --git a/gcc/ra-build.c b/gcc/ra-build.c
index e9160b4db77..10517d7fa0c 100644
--- a/gcc/ra-build.c
+++ b/gcc/ra-build.c
@@ -256,8 +256,7 @@ copy_insn_p (rtx insn, rtx *source, rtx *target)
unsigned int d_regno, s_regno;
int uid = INSN_UID (insn);
- if (!INSN_P (insn))
- abort ();
+ gcc_assert (INSN_P (insn));
/* First look, if we already saw this insn. */
if (copy_cache[uid].seen != COPY_P_NOTSEEN)
@@ -287,13 +286,13 @@ copy_insn_p (rtx insn, rtx *source, rtx *target)
coalescing (the check for this is in remember_move() below). */
while (GET_CODE (d) == STRICT_LOW_PART)
d = XEXP (d, 0);
- if (GET_CODE (d) != REG
- && (GET_CODE (d) != SUBREG || GET_CODE (SUBREG_REG (d)) != REG))
+ if (!REG_P (d)
+ && (GET_CODE (d) != SUBREG || !REG_P (SUBREG_REG (d))))
return 0;
while (GET_CODE (s) == STRICT_LOW_PART)
s = XEXP (s, 0);
- if (GET_CODE (s) != REG
- && (GET_CODE (s) != SUBREG || GET_CODE (SUBREG_REG (s)) != REG))
+ if (!REG_P (s)
+ && (GET_CODE (s) != SUBREG || !REG_P (SUBREG_REG (s))))
return 0;
s_regno = (unsigned) REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
@@ -575,30 +574,31 @@ remember_move (rtx insn)
if (!TEST_BIT (move_handled, INSN_UID (insn)))
{
rtx s, d;
+ int ret;
+ struct df_link *slink = DF_INSN_USES (df, insn);
+ struct df_link *link = DF_INSN_DEFS (df, insn);
+
SET_BIT (move_handled, INSN_UID (insn));
- if (copy_insn_p (insn, &s, &d) == COPY_P_MOVE)
- {
- /* Some sanity test for the copy insn. */
- struct df_link *slink = DF_INSN_USES (df, insn);
- struct df_link *link = DF_INSN_DEFS (df, insn);
- if (!link || !link->ref || !slink || !slink->ref)
- abort ();
- /* The following (link->next != 0) happens when a hardreg
- is used in wider mode (REG:DI %eax). Then df.* creates
- a def/use for each hardreg contained therein. We only
- allow hardregs here. */
- if (link->next
- && DF_REF_REGNO (link->next->ref) >= FIRST_PSEUDO_REGISTER)
- abort ();
- }
- else
- abort ();
+ ret = copy_insn_p (insn, &s, &d);
+ gcc_assert (ret == COPY_P_MOVE);
+
+ /* Some sanity test for the copy insn. */
+ gcc_assert (link && link->ref);
+ gcc_assert (slink && slink->ref);
+ /* The following (link->next != 0) happens when a hardreg
+ is used in wider mode (REG:DI %eax). Then df.* creates
+ a def/use for each hardreg contained therein. We only
+ allow hardregs here. */
+ gcc_assert (!link->next
+ || DF_REF_REGNO (link->next->ref)
+ < FIRST_PSEUDO_REGISTER);
+
/* XXX for now we don't remember move insns involving any subregs.
Those would be difficult to coalesce (we would need to implement
handling of all the subwebs in the allocator, including that such
- subwebs could be source and target of coalesing). */
+ subwebs could be source and target of coalescing). */
if (GET_CODE (s) == GET_CODE (d)
- && (GET_CODE (s) == REG
+ && (REG_P (s)
|| (GET_CODE (s) == SUBREG
&& SUBREG_BYTE (s) == SUBREG_BYTE (d)
&& GET_MODE_SIZE (GET_MODE (SUBREG_REG (s)))
@@ -708,7 +708,7 @@ defuse_overlap_p_1 (rtx def, struct curr_use *use)
return (old_u != use->undefined) ? 4 : -1;
}
default:
- abort ();
+ gcc_unreachable ();
}
}
@@ -764,7 +764,7 @@ live_out_1 (struct df *df ATTRIBUTE_UNUSED, struct curr_use *use, rtx insn)
/* We want to access the root webpart. */
wp = find_web_part (wp);
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
if (wp->num_calls < 255)
wp->num_calls++;
@@ -891,8 +891,7 @@ live_out_1 (struct df *df ATTRIBUTE_UNUSED, struct curr_use *use, rtx insn)
{
/* If this insn doesn't completely define the USE, increment also
it's spanned deaths count (if this insn contains a death). */
- if (uid >= death_insns_max_uid)
- abort ();
+ gcc_assert (uid < death_insns_max_uid);
if (TEST_BIT (insns_with_deaths, uid))
wp->spanned_deaths++;
use->undefined = final_undef;
@@ -955,7 +954,7 @@ live_in_edge (struct df *df, struct curr_use *use, edge e)
use->live_over_abnormal = 1;
bitmap_set_bit (live_at_end[e->src->index], DF_REF_ID (use->wp->ref));
info_pred = (struct ra_bb_info *) e->src->aux;
- next_insn = e->src->end;
+ next_insn = BB_END (e->src);
/* If the last insn of the pred. block doesn't completely define the
current use, we need to check the block. */
@@ -970,7 +969,7 @@ live_in_edge (struct df *df, struct curr_use *use, edge e)
creation to later. */
bitmap_set_bit (info_pred->live_throughout,
DF_REF_ID (use->wp->ref));
- next_insn = e->src->head;
+ next_insn = BB_HEAD (e->src);
}
return next_insn;
}
@@ -996,6 +995,7 @@ live_in (struct df *df, struct curr_use *use, rtx insn)
are allowed. */
while (1)
{
+ unsigned int i;
int uid = INSN_UID (insn);
basic_block bb = BLOCK_FOR_INSN (insn);
number_seen[uid]++;
@@ -1012,7 +1012,7 @@ live_in (struct df *df, struct curr_use *use, rtx insn)
edge e;
unsigned HOST_WIDE_INT undef = use->undefined;
struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
- if ((e = bb->pred) == NULL)
+ if (EDGE_COUNT (bb->preds) == 0)
return;
/* We now check, if we already traversed the predecessors of this
block for the current pass and the current set of undefined
@@ -1024,13 +1024,15 @@ live_in (struct df *df, struct curr_use *use, rtx insn)
info->pass = loc_vpass;
info->undefined = undef;
/* All but the last predecessor are handled recursively. */
- for (; e->pred_next; e = e->pred_next)
+ for (e = NULL, i = 0; i < EDGE_COUNT (bb->preds) - 1; i++)
{
+ e = EDGE_PRED (bb, i);
insn = live_in_edge (df, use, e);
if (insn)
live_in (df, use, insn);
use->undefined = undef;
}
+ e = EDGE_PRED (bb, i);
insn = live_in_edge (df, use, e);
if (!insn)
return;
@@ -1145,7 +1147,7 @@ livethrough_conflicts_bb (basic_block bb)
/* First collect the IDs of all defs, count the number of death
containing insns, and if there's some call_insn here. */
all_defs = BITMAP_XMALLOC ();
- for (insn = bb->head; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
{
@@ -1156,12 +1158,12 @@ livethrough_conflicts_bb (basic_block bb)
bitmap_set_bit (all_defs, DF_REF_ID (info.defs[n]));
if (TEST_BIT (insns_with_deaths, INSN_UID (insn)))
deaths++;
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
contains_call++;
if (bitmap_bit_p (uid_memset, INSN_UID (insn)))
has_memset = 1;
}
- if (insn == bb->end)
+ if (insn == BB_END (bb))
break;
}
@@ -1169,22 +1171,25 @@ livethrough_conflicts_bb (basic_block bb)
uses conflict with all defs, and update their other members. */
if (deaths > 0 || contains_call || has_memset
|| bitmap_first_set_bit (all_defs) >= 0)
- EXECUTE_IF_SET_IN_BITMAP (info->live_throughout, first, use_id,
- {
- struct web_part *wp = &web_parts[df->def_id + use_id];
- unsigned int bl = rtx_to_bits (DF_REF_REG (wp->ref));
- bitmap conflicts;
- wp = find_web_part (wp);
- wp->spanned_deaths += deaths;
- if (contains_call + wp->num_calls > 255)
+ {
+ bitmap_iterator bi;
+ EXECUTE_IF_SET_IN_BITMAP (info->live_throughout, first, use_id, bi)
+ {
+ struct web_part *wp = &web_parts[df->def_id + use_id];
+ unsigned int bl = rtx_to_bits (DF_REF_REG (wp->ref));
+ bitmap conflicts;
+ wp = find_web_part (wp);
+ wp->spanned_deaths += deaths;
+ if (contains_call + wp->num_calls > 255)
wp->num_calls = 255;
- else
+ else
wp->num_calls += contains_call;
- wp->crosses_call |= !!contains_call;
- wp->crosses_memset |= !!has_memset;
- conflicts = get_sub_conflicts (wp, bl);
- bitmap_operation (conflicts, conflicts, all_defs, BITMAP_IOR);
- });
+ wp->crosses_call |= !!contains_call;
+ wp->crosses_memset |= !!has_memset;
+ conflicts = get_sub_conflicts (wp, bl);
+ bitmap_operation (conflicts, conflicts, all_defs, BITMAP_IOR);
+ }
+ }
BITMAP_XFREE (all_defs);
}
@@ -1323,8 +1328,7 @@ prune_hardregs_for_mode (HARD_REG_SET *s, enum machine_mode mode)
static void
init_one_web_common (struct web *web, rtx reg)
{
- if (GET_CODE (reg) != REG)
- abort ();
+ gcc_assert (REG_P (reg));
/* web->id isn't initialized here. */
web->regno = REGNO (reg);
web->orig_x = reg;
@@ -1400,10 +1404,8 @@ reinit_one_web (struct web *web, rtx reg)
web->stack_slot = NULL;
web->pattern = NULL;
web->alias = NULL;
- if (web->moves)
- abort ();
- if (!web->useless_conflicts)
- abort ();
+ gcc_assert (!web->moves);
+ gcc_assert (web->useless_conflicts);
}
/* Insert and returns a subweb corresponding to REG into WEB (which
@@ -1413,8 +1415,7 @@ static struct web *
add_subweb (struct web *web, rtx reg)
{
struct web *w;
- if (GET_CODE (reg) != SUBREG)
- abort ();
+ gcc_assert (GET_CODE (reg) == SUBREG);
w = xmalloc (sizeof (struct web));
/* Copy most content from parent-web. */
*w = *web;
@@ -1451,8 +1452,7 @@ add_subweb_2 (struct web *web, unsigned int size_word)
mode = mode_for_size (size, GET_MODE_CLASS (GET_MODE (ref_rtx)), 0);
if (mode == BLKmode)
mode = mode_for_size (size, MODE_INT, 0);
- if (mode == BLKmode)
- abort ();
+ gcc_assert (mode != BLKmode);
web = add_subweb (web, gen_rtx_SUBREG (mode, web->orig_x,
BYTE_BEGIN (size_word)));
web->artificial = 1;
@@ -1471,15 +1471,14 @@ init_web_parts (struct df *df)
{
if (df->defs[no])
{
- if (no < last_def_id && web_parts[no].ref != df->defs[no])
- abort ();
+ gcc_assert (no >= last_def_id || web_parts[no].ref == df->defs[no]);
web_parts[no].ref = df->defs[no];
/* Uplink might be set from the last iteration. */
if (!web_parts[no].uplink)
num_webs++;
}
else
- /* The last iteration might have left .ref set, while df_analyse()
+ /* The last iteration might have left .ref set, while df_analyze()
removed that ref (due to a removed copy insn) from the df->defs[]
array. As we don't check for that in realloc_web_parts()
we do that here. */
@@ -1489,9 +1488,8 @@ init_web_parts (struct df *df)
{
if (df->uses[no])
{
- if (no < last_use_id
- && web_parts[no + df->def_id].ref != df->uses[no])
- abort ();
+ gcc_assert (no >= last_use_id
+ || web_parts[no + df->def_id].ref == df->uses[no]);
web_parts[no + df->def_id].ref = df->uses[no];
if (!web_parts[no + df->def_id].uplink)
num_webs++;
@@ -1539,8 +1537,8 @@ static void
copy_conflict_list (struct web *web)
{
struct conflict_link *cl;
- if (web->orig_conflict_list || web->have_orig_conflicts)
- abort ();
+ gcc_assert (!web->orig_conflict_list);
+ gcc_assert (!web->have_orig_conflicts);
web->have_orig_conflicts = 1;
for (cl = web->conflict_list; cl; cl = cl->next)
{
@@ -1655,8 +1653,7 @@ record_conflict (struct web *web1, struct web *web2)
/* Trivial non-conflict or already recorded conflict. */
if (web1 == web2 || TEST_BIT (igraph, index))
return;
- if (id1 == id2)
- abort ();
+ gcc_assert (id1 != id2);
/* As fixed_regs are no targets for allocation, conflicts with them
are pointless. */
if ((web1->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web1->regno])
@@ -1746,34 +1743,41 @@ compare_and_free_webs (struct web_link **link)
{
struct web *web1 = wl->web;
struct web *web2 = ID2WEB (web1->id);
- if (web1->regno != web2->regno
- || (web1->type != PRECOLORED
- && web1->num_calls < web2->num_calls)
- || web1->mode_changed != web2->mode_changed
- || !rtx_equal_p (web1->orig_x, web2->orig_x)
- || web1->type != web2->type
- /* Only compare num_defs/num_uses with non-hardreg webs.
- E.g. the number of uses of the framepointer changes due to
- inserting spill code. */
- || (web1->type != PRECOLORED
- && (web1->num_uses != web2->num_uses
- || web1->num_defs != web2->num_defs))
- /* Similarly, if the framepointer was unreferenced originally
- but we added spills, these fields may not match. */
- || (web1->type != PRECOLORED
- && web1->crosses_call != web2->crosses_call)
- || (web1->type != PRECOLORED
- && web1->live_over_abnormal != web2->live_over_abnormal))
- abort ();
+ gcc_assert (web1->regno == web2->regno);
+ gcc_assert (web1->type == PRECOLORED
+ || web1->num_calls >= web2->num_calls);
+ gcc_assert (web1->mode_changed == web2->mode_changed);
+ gcc_assert (rtx_equal_p (web1->orig_x, web2->orig_x));
+ gcc_assert (web1->type == web2->type);
+ /* Only compare num_defs/num_uses with non-hardreg webs.
+ E.g. the number of uses of the framepointer changes due to
+ inserting spill code. */
+ gcc_assert (web1->type == PRECOLORED
+ || (web1->num_uses == web2->num_uses
+ && web1->num_defs == web2->num_defs));
+ /* Similarly, if the framepointer was unreferenced originally
+ but we added spills, these fields may not match. */
+ gcc_assert (web1->type == PRECOLORED
+ || web1->crosses_call == web2->crosses_call);
+ gcc_assert (web1->type == PRECOLORED
+ || web1->live_over_abnormal == web2->live_over_abnormal);
if (web1->type != PRECOLORED)
{
unsigned int i;
+
+ /* Only compare num_defs/num_uses with non-hardreg webs.
+ E.g. the number of uses of the framepointer changes due to
+ inserting spill code. */
+ gcc_assert (web1->num_uses == web2->num_uses);
+ gcc_assert (web1->num_defs == web2->num_defs);
+ /* Similarly, if the framepointer was unreferenced originally
+ but we added spills, these fields may not match. */
+ gcc_assert (web1->crosses_call == web2->crosses_call);
+ gcc_assert (web1->live_over_abnormal == web2->live_over_abnormal);
for (i = 0; i < web1->num_defs; i++)
- if (web1->defs[i] != web2->defs[i])
- abort ();
+ gcc_assert (web1->defs[i] == web2->defs[i]);
for (i = 0; i < web1->num_uses; i++)
- if (web1->uses[i] != web2->uses[i])
- abort ();
+ gcc_assert (web1->uses[i] == web2->uses[i]);
}
if (web1->type == PRECOLORED)
{
@@ -1818,8 +1822,8 @@ init_webs_defs_uses (void)
web->uses[use_i++] = link->ref;
}
web->temp_refs = NULL;
- if (def_i != web->num_defs || use_i != web->num_uses)
- abort ();
+ gcc_assert (def_i == web->num_defs);
+ gcc_assert (use_i == web->num_uses);
}
}
@@ -1928,11 +1932,13 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
web->id = newid;
web->temp_refs = NULL;
webnum++;
- if (web->regno < FIRST_PSEUDO_REGISTER && !hardreg2web[web->regno])
- hardreg2web[web->regno] = web;
- else if (web->regno < FIRST_PSEUDO_REGISTER
- && hardreg2web[web->regno] != web)
- abort ();
+ if (web->regno < FIRST_PSEUDO_REGISTER)
+ {
+ if (!hardreg2web[web->regno])
+ hardreg2web[web->regno] = web;
+ else
+ gcc_assert (hardreg2web[web->regno] == web);
+ }
}
/* If this reference already had a web assigned, we are done.
@@ -1955,8 +1961,8 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
web->live_over_abnormal = 1;
/* And check, that it's not a newly allocated web. This would be
an inconsistency. */
- if (!web->old_web || web->type == PRECOLORED)
- abort ();
+ gcc_assert (web->old_web);
+ gcc_assert (web->type != PRECOLORED);
continue;
}
/* In case this was no web part root, we need to initialize WEB
@@ -1979,13 +1985,16 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
if (!subweb)
{
subweb = add_subweb (web, reg);
- if (web->old_web)
- abort ();
+ gcc_assert (!web->old_web);
}
}
else
subweb = web;
+ /* Test, that if def2web[i] was NULL above, that we are _not_
+ an old web. */
+ gcc_assert (!web->old_web || web->type == PRECOLORED);
+
/* It can happen that this is an old web, but still the current
ref hadn't that web noted in def2web[i]. I.e. strictly speaking the
set of references of that web had changed, which in one way
@@ -1996,7 +2005,7 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
them. This means, that even some unchanged references will
get deleted/recreated, which in turn means a "new" ref for that web.
But it also means one "deleted" ref for it, so we simply replace
- the refs inline. */
+ the refs inline. XXX: remove me */
if (web->old_web && web->type != PRECOLORED)
{
unsigned int count, r;
@@ -2048,14 +2057,9 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
{
struct web *compare = def2web[i];
if (i < last_def_id)
- {
- if (web->old_web && compare != subweb)
- abort ();
- }
- if (!web->old_web && compare)
- abort ();
- if (compare && compare != subweb)
- abort ();
+ gcc_assert (!web->old_web || compare == subweb);
+ gcc_assert (web->old_web || !compare);
+ gcc_assert (!compare || compare == subweb);
}
def2web[i] = subweb;
web->num_defs++;
@@ -2065,15 +2069,11 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
if (ra_pass > 1)
{
struct web *compare = use2web[ref_id];
- if (ref_id < last_use_id)
- {
- if (web->old_web && compare != subweb)
- abort ();
- }
- if (!web->old_web && compare)
- abort ();
- if (compare && compare != subweb)
- abort ();
+
+ gcc_assert (ref_id >= last_use_id
+ || !web->old_web || compare == subweb);
+ gcc_assert (web->old_web || !compare);
+ gcc_assert (!compare || compare == subweb);
}
use2web[ref_id] = subweb;
web->num_uses++;
@@ -2083,8 +2083,7 @@ parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
}
/* We better now have exactly as many webs as we had web part roots. */
- if (webnum != num_webs)
- abort ();
+ gcc_assert (webnum == num_webs);
return webnum;
}
@@ -2132,8 +2131,7 @@ parts_to_webs (struct df *df)
struct web *web;
if (wp->uplink || !wp->ref)
{
- if (wp->sub_conflicts)
- abort ();
+ gcc_assert (!wp->sub_conflicts);
continue;
}
web = def2web[i];
@@ -2237,8 +2235,7 @@ reset_conflicts (void)
web->conflict_list = web->orig_conflict_list;
web->orig_conflict_list = NULL;
}
- if (web->orig_conflict_list)
- abort ();
+ gcc_assert (!web->orig_conflict_list);
/* New non-precolored webs, have no conflict list. */
if (web->type != PRECOLORED && !web->old_web)
@@ -2247,8 +2244,7 @@ reset_conflicts (void)
/* Useless conflicts will be rebuilt completely. But check
for cleanliness, as the web might have come from the
free list. */
- if (bitmap_first_set_bit (web->useless_conflicts) >= 0)
- abort ();
+ gcc_assert (bitmap_first_set_bit (web->useless_conflicts) < 0);
}
else
{
@@ -2303,8 +2299,7 @@ check_conflict_numbers (void)
for (cl = web->conflict_list; cl; cl = cl->next)
if (cl->t->type != SELECT && cl->t->type != COALESCED)
new_conf += 1 + cl->t->add_hardregs;
- if (web->type != PRECOLORED && new_conf != web->num_conflicts)
- abort ();
+ gcc_assert (web->type == PRECOLORED || new_conf == web->num_conflicts);
}
}
#endif
@@ -2326,9 +2321,7 @@ static void
conflicts_between_webs (struct df *df)
{
unsigned int i;
-#ifdef STACK_REGS
struct dlist *d;
-#endif
bitmap ignore_defs = BITMAP_XMALLOC ();
unsigned int have_ignored;
unsigned int *pass_cache = xcalloc (num_webs, sizeof (int));
@@ -2372,6 +2365,8 @@ conflicts_between_webs (struct df *df)
{
int j;
struct web *web1 = find_subweb_2 (supweb1, cl->size_word);
+ bitmap_iterator bi;
+
if (have_ignored)
bitmap_operation (cl->conflicts, cl->conflicts, ignore_defs,
BITMAP_AND_COMPL);
@@ -2386,8 +2381,7 @@ conflicts_between_webs (struct df *df)
pass++;
/* Note, that there are only defs in the conflicts bitset. */
- EXECUTE_IF_SET_IN_BITMAP (
- cl->conflicts, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (cl->conflicts, 0, j, bi)
{
struct web *web2 = def2web[j];
unsigned int id2 = web2->id;
@@ -2396,34 +2390,31 @@ conflicts_between_webs (struct df *df)
pass_cache[id2] = pass;
record_conflict (web1, web2);
}
- });
+ }
}
}
free (pass_cache);
BITMAP_XFREE (ignore_defs);
-/* for (d = WEBS(INITIAL); d; d = d->next)
+ for (d = WEBS(INITIAL); d; d = d->next)
{
struct web *web = DLIST_WEB (d);
int j;
- if (web->crosses_call)
+
+ /*if (web->crosses_call)
for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, j))
- record_conflict (web, hardreg2web[j]);
- }*/
+ record_conflict (web, hardreg2web[j]);*/
+
#ifdef STACK_REGS
- /* Pseudos can't go in stack regs if they are live at the beginning of
- a block that is reached by an abnormal edge. */
- for (d = WEBS(INITIAL); d; d = d->next)
- {
- struct web *web = DLIST_WEB (d);
- int j;
+ /* Pseudos can't go in stack regs if they are live at the beginning of
+ a block that is reached by an abnormal edge. */
if (web->live_over_abnormal)
for (j = FIRST_STACK_REG; j <= LAST_STACK_REG; j++)
record_conflict (web, hardreg2web[j]);
- }
#endif
+ }
}
/* This adds conflicts from early-clobber operands and those to prevent
@@ -2671,7 +2662,7 @@ contains_pseudo (rtx x)
int i;
if (GET_CODE (x) == SUBREG)
x = SUBREG_REG (x);
- if (GET_CODE (x) == REG)
+ if (REG_P (x))
{
if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
return 1;
@@ -2830,12 +2821,12 @@ detect_remat_webs (void)
we created them ourself. They might not have set their
unchanging flag set, but nevertheless they are stable across
the livetime in question. */
- || (GET_CODE (src) == MEM
+ || (MEM_P (src)
&& bitmap_bit_p (emitted_by_spill, INSN_UID (insn))
&& memref_is_stack_slot (src))
/* If the web isn't live over any mem clobber we can remat
any MEM rtl. */
- || (GET_CODE (src) == MEM && !web->crosses_memset
+ || (MEM_P (src) && !web->crosses_memset
/*&& address_is_stable*/ /* XXX */
&& !contains_pseudo (src)
&& web->num_uses <= 2)
@@ -2965,10 +2956,10 @@ detect_webs_set_in_cond_jump (void)
{
basic_block bb;
FOR_EACH_BB (bb)
- if (GET_CODE (bb->end) == JUMP_INSN)
+ if (JUMP_P (BB_END (bb)))
{
struct df_link *link;
- for (link = DF_INSN_DEFS (df, bb->end); link; link = link->next)
+ for (link = DF_INSN_DEFS (df, BB_END (bb)); link; link = link->next)
if (link->ref && DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER)
{
struct web *web = def2web[DF_REF_ID (link->ref)];
@@ -2983,7 +2974,7 @@ detect_webs_set_in_cond_jump (void)
num_freedom,
num_conflicts. */
static void
-select_regclass ()
+select_regclass (void)
{
struct dlist *d, *d_next;
char *insn_alternative;
@@ -3005,10 +2996,10 @@ select_regclass ()
depending on reg_class of each web. */
FOR_EACH_BB (bb)
{
- rtx end = bb->end;
+ rtx end = BB_END (bb);
rtx insn;
- for (insn = bb->head;
+ for (insn = BB_HEAD (bb);
insn && PREV_INSN (insn) != end;
insn = NEXT_INSN (insn))
{
@@ -3131,6 +3122,10 @@ select_regclass ()
COPY_HARD_REG_SET (s, call_fixed_reg_set);
AND_HARD_REG_SET (s, call_used_reg_set);
AND_COMPL_HARD_REG_SET (web->usable_regs, s);
+ /* Webs crossing calls can't have their value in registers,
+ if the current function has a non-local label. */
+ if (current_function_has_nonlocal_label)
+ CLEAR_HARD_REG_SET (web->usable_regs);
}
if (web->live_over_abnormal)
AND_COMPL_HARD_REG_SET (web->usable_regs, regs_invalidated_by_call);
@@ -3354,7 +3349,7 @@ handle_asm_insn (struct df *df, rtx insn)
for (i = 0; i < XVECLEN (pat, 0); i++)
{
rtx t = XVECEXP (pat, 0, i);
- if (GET_CODE (t) == CLOBBER && GET_CODE (XEXP (t, 0)) == REG
+ if (GET_CODE (t) == CLOBBER && REG_P (XEXP (t, 0))
&& REGNO (XEXP (t, 0)) < FIRST_PSEUDO_REGISTER)
SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0)));
}
@@ -3379,7 +3374,7 @@ handle_asm_insn (struct df *df, rtx insn)
|| GET_CODE (reg) == SIGN_EXTRACT
|| GET_CODE (reg) == STRICT_LOW_PART)
reg = XEXP (reg, 0);
- if (GET_CODE (reg) != REG || REGNO (reg) < FIRST_PSEUDO_REGISTER)
+ if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
continue;
/* Search the web corresponding to this operand. We depend on
@@ -3395,10 +3390,8 @@ handle_asm_insn (struct df *df, rtx insn)
link = link->next;
if (!link || !link->ref)
{
- if (in_output)
- in_output = 0;
- else
- abort ();
+ gcc_assert (in_output);
+ in_output = 0;
}
else
break;
@@ -3497,7 +3490,7 @@ handle_asm_insn (struct df *df, rtx insn)
record_conflict (web, hardreg2web[c]);
#endif
}
- if (rtl_dump_file)
+ if (dump_file)
{
int c;
ra_debug_msg (DUMP_ASM, " ASM constrain Web %d conflicts with:", web->id);
@@ -3678,11 +3671,9 @@ ra_build_free (void)
for (i = 0; i < num_webs; i++)
{
struct web *web = ID2WEB (i);
- if (!web)
- abort ();
- if (i >= num_webs - num_subwebs
- && (web->conflict_list || web->orig_conflict_list))
- abort ();
+ gcc_assert (web);
+ gcc_assert (i < num_webs - num_subwebs
+ || (!web->conflict_list && !web->orig_conflict_list));
web->moves = NULL;
}
/* All webs in the free list have no defs or uses anymore. */
@@ -3760,13 +3751,13 @@ ra_build_free_all (struct df *df)
}
static void
-detect_spanned_deaths (spanned_deaths)
- unsigned int *spanned_deaths;
+detect_spanned_deaths (unsigned int *spanned_deaths)
{
rtx insn, head_prev;
unsigned int j;
basic_block bb;
unsigned int i;
+ bitmap_iterator bi;
bitmap already ATTRIBUTE_UNUSED;
int debug = 0;
sbitmap live = sbitmap_alloc (num_webs);
@@ -3783,10 +3774,10 @@ detect_spanned_deaths (spanned_deaths)
FOR_ALL_BB (bb)
{
- if (!bb->end)
+ if (!BB_END (bb))
continue;
- insn = bb->end;
+ insn = BB_END (bb);
if (!INSN_P (insn))
insn = prev_real_insn (insn);
@@ -3794,7 +3785,7 @@ detect_spanned_deaths (spanned_deaths)
if (!insn || BLOCK_FOR_INSN (insn) != bb)
continue;
- head_prev = PREV_INSN (bb->head);
+ head_prev = PREV_INSN (BB_HEAD (bb));
sbitmap_zero (live);
if(debug)
@@ -3803,8 +3794,7 @@ detect_spanned_deaths (spanned_deaths)
bitmap_clear (already);
}
- EXECUTE_IF_SET_IN_BITMAP
- (live_at_end[bb->index], 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (live_at_end[bb->index], 0, j, bi)
{
struct web *web = use2web[j];
struct web *aweb = alias (find_web_for_subweb (web));
@@ -3815,7 +3805,7 @@ detect_spanned_deaths (spanned_deaths)
fprintf (stderr, " %d", aweb->id);
bitmap_set_bit (already, aweb->id);
}
- });
+ }
if (debug)
fprintf (stderr, "\n");
@@ -3893,8 +3883,7 @@ detect_spanned_deaths (spanned_deaths)
if (has_useless_defs
&& ! bitmap_bit_p (emitted_by_spill, INSN_UID (insn)))
{
- EXECUTE_IF_SET_IN_SBITMAP
- (live, 0, i,
+ EXECUTE_IF_SET_IN_SBITMAP (live, 0, i,
{
struct web *supweb;
supweb = find_web_for_subweb (id2web[i]);
@@ -3927,8 +3916,7 @@ detect_spanned_deaths (spanned_deaths)
fprintf (stderr, " Death web %d in insn: %d ++",
web->id, INSN_UID(insn));
- EXECUTE_IF_SET_IN_SBITMAP
- (live, 0, i,
+ EXECUTE_IF_SET_IN_SBITMAP (live, 0, i,
{
struct web *supweb;
supweb = find_web_for_subweb (id2web[i]);
@@ -4251,15 +4239,16 @@ web_class_spill (struct web *web, char *insn_alternative)
if (!reg_class_subset_p (web->regclass, class))
{
+#if 0
static const char *const reg_class_names[] = REG_CLASS_NAMES;
fprintf (stderr, "%s pass: %d Web %d class %s insn class %s\n",
- cfun->name, ra_pass,
+ current_function_name (), ra_pass,
web->id, reg_class_names[web->regclass],
reg_class_names[class]);
fprintf (stderr, "Spill out reg %d from insn:\n",
DF_REF_REGNO (dref));
debug_rtx (DF_REF_INSN (dref));
-
+#endif
web_class_spill_ref (web, dref);
spilled_web = 1;
if (!already_insn)
diff --git a/gcc/ra-colorize.c b/gcc/ra-colorize.c
index c38f5c729dd..735f6b95e35 100644
--- a/gcc/ra-colorize.c
+++ b/gcc/ra-colorize.c
@@ -1,5 +1,5 @@
/* Graph coloring register allocator
- Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Matz <matz@suse.de>
and Daniel Berlin <dan@cgsoftware.com>.
@@ -52,7 +52,7 @@ extern int find_splits PARAMS ((struct web *));
static void push_list (struct dlist *, struct dlist **);
static void push_list_end (struct dlist *, struct dlist **);
static void free_dlist (struct dlist **);
-static void put_web_at_end (struct web *, enum node_type);
+static void put_web_at_end (struct web *, enum ra_node_type);
static void put_move (struct move *, enum move_type);
static void build_worklists (struct df *);
static void enable_move (struct web *);
@@ -63,7 +63,7 @@ static void remove_move (struct web *, struct move *);
static void add_worklist (struct web *);
static int ok (struct web *, struct web *);
static int conservative (struct web *, struct web *);
-static inline unsigned int simplify_p (enum node_type);
+static inline unsigned int simplify_p (enum ra_node_type);
static void combine (struct web *, struct web *);
static void coalesce (void);
static void freeze_moves (struct web *);
@@ -97,6 +97,7 @@ static void add_web_pair_cost (struct web *, struct web *,
static int comp_web_pairs (const void *, const void *);
static int enough_free_p (struct web *, struct web *);
static void sort_and_combine_web_pairs (int);
+static int ok_class (struct web *, struct web *);
static void aggressive_coalesce (void);
static void extended_coalesce_2 (void);
static void check_uncoalesced_moves (void);
@@ -109,8 +110,8 @@ static struct dlist *mv_frozen, *mv_active;
static void
push_list (struct dlist *x, struct dlist **list)
{
- if (x->next || x->prev)
- abort ();
+ gcc_assert (!x->next);
+ gcc_assert (!x->prev);
x->next = *list;
if (*list)
(*list)->prev = x;
@@ -120,8 +121,8 @@ push_list (struct dlist *x, struct dlist **list)
static void
push_list_end (struct dlist *x, struct dlist **list)
{
- if (x->prev || x->next)
- abort ();
+ gcc_assert (!x->prev);
+ gcc_assert (!x->next);
if (!*list)
{
*list = x;
@@ -173,7 +174,7 @@ free_dlist (struct dlist **list)
Inline, because it's called with constant TYPE every time. */
inline void
-put_web (struct web *web, enum node_type type)
+put_web (struct web *web, enum ra_node_type type)
{
switch (type)
{
@@ -201,7 +202,7 @@ put_web (struct web *web, enum node_type type)
push_list (web->dlink, &WEBS(SIMPLIFY));
break;
default:
- abort ();
+ gcc_unreachable ();
}
web->type = type;
}
@@ -216,10 +217,13 @@ void
reset_lists (void)
{
struct dlist *d;
- unsigned int i;
- if (WEBS(SIMPLIFY) || WEBS(SIMPLIFY_SPILL) || WEBS(SIMPLIFY_FAT)
- || WEBS(FREEZE) || WEBS(SPILL) || WEBS(SELECT))
- abort ();
+
+ gcc_assert (!WEBS(SIMPLIFY));
+ gcc_assert (!WEBS(SIMPLIFY_SPILL));
+ gcc_assert (!WEBS(SIMPLIFY_FAT));
+ gcc_assert (!WEBS(FREEZE));
+ gcc_assert (!WEBS(SPILL));
+ gcc_assert (!WEBS(SELECT));
while ((d = pop_list (&WEBS(COALESCED))) != NULL)
{
@@ -248,13 +252,20 @@ reset_lists (void)
BITMAP_XFREE (web->useless_conflicts);
}
+#ifdef ENABLE_CHECKING
/* Sanity check, that we only have free, initial or precolored webs. */
- for (i = 0; i < num_webs; i++)
- {
- struct web *web = ID2WEB (i);
- if (web->type != INITIAL && web->type != FREE && web->type != PRECOLORED)
- abort ();
- }
+ {
+ unsigned int i;
+
+ for (i = 0; i < num_webs; i++)
+ {
+ struct web *web = ID2WEB (i);
+
+ gcc_assert (web->type == INITIAL || web->type == FREE
+ || web->type == PRECOLORED);
+ }
+ }
+#endif
free_dlist (&mv_worklist);
free_dlist (&mv_coalesced);
free_dlist (&mv_constrained);
@@ -266,12 +277,12 @@ reset_lists (void)
list. Additionally TYPE may not be SIMPLIFY. */
static void
-put_web_at_end (struct web *web, enum node_type type)
+put_web_at_end (struct web *web, enum ra_node_type type)
{
if (type == PRECOLORED)
type = INITIAL;
- else if (type == SIMPLIFY)
- abort ();
+ else
+ gcc_assert (type != SIMPLIFY);
push_list_end (web->dlink, &WEBS(type));
web->type = type;
}
@@ -311,7 +322,7 @@ put_move (struct move *move, enum move_type type)
push_list (move->dlink, &mv_active);
break;
default:
- abort ();
+ gcc_unreachable ();
}
move->type = type;
}
@@ -497,8 +508,7 @@ remove_move (struct web *web, struct move *move)
struct move_list *ml;
remove_move_1 (web, move);
for (ml = web->moves; ml; ml = ml->next)
- if (ml->move == move)
- abort ();
+ gcc_assert (ml->move != move);
}
/* Merge the moves for the two webs into the first web's movelist. */
@@ -559,7 +569,7 @@ ok (struct web *target, struct web *source)
return 0;
/* Sanity for funny modes. */
- size = HARD_REGNO_NREGS (color, GET_MODE (target->orig_x));
+ size = hard_regno_nregs[color][GET_MODE (target->orig_x)];
if (!size)
return 0;
@@ -615,7 +625,7 @@ ok (struct web *target, struct web *source)
{
/* The main webs do _not_ conflict, only some parts of both. This
means, that 4 is possibly true, so we need to check this too.
- For this we go thru all sub conflicts between T and C, and see if
+ For this we go through all sub conflicts between T and C, and see if
the target part of C already conflicts with S. When this is not
the case we disallow coalescing. */
struct sub_conflict *sl;
@@ -680,7 +690,7 @@ alias (struct web *web)
SIMPLIFY types. */
static inline unsigned int
-simplify_p (enum node_type type)
+simplify_p (enum ra_node_type type)
{
return type == SIMPLIFY || type == SIMPLIFY_SPILL || type == SIMPLIFY_FAT;
}
@@ -692,8 +702,8 @@ combine (struct web *u, struct web *v)
{
int i;
struct conflict_link *wl;
- if (u == v || v->type == COALESCED)
- abort ();
+ gcc_assert (u != v);
+ gcc_assert (v->type != COALESCED);
/* If not both U and V are stack pseudos make U the non-stack one. */
if (SPILL_SLOT_P (u->regno) && !SPILL_SLOT_P (v->regno))
{
@@ -701,8 +711,6 @@ combine (struct web *u, struct web *v)
u = v;
v = h;
}
- if (0 && SPILL_SLOT_P (u->regno) != SPILL_SLOT_P (v->regno))
- abort ();
remove_web_from_list (v);
put_web (v, COALESCED);
v->alias = u;
@@ -742,7 +750,7 @@ combine (struct web *u, struct web *v)
struct web *web = u;
int nregs = 1 + v->add_hardregs;
if (u->type == PRECOLORED)
- nregs = HARD_REGNO_NREGS (u->color, GET_MODE (v->orig_x));
+ nregs = hard_regno_nregs[u->color][GET_MODE (v->orig_x)];
/* For precolored U's we need to make conflicts between V's
neighbors and as many hardregs from U as V needed if it gets
@@ -798,10 +806,9 @@ combine (struct web *u, struct web *v)
conflicts. */
u->num_freedom = hard_regs_count (u->usable_regs);
u->num_freedom -= u->add_hardregs;
- /* The next would mean an invalid coalesced move (both webs have no
- possible hardreg in common), so abort. */
- if (!u->num_freedom)
- abort();
+ /* The next checks for an invalid coalesced move (both webs must have
+ possible hardregs in common). */
+ gcc_assert (u->num_freedom);
IOR_HARD_REG_SET (u->prefer_colors, v->prefer_colors);
@@ -849,7 +856,8 @@ coalesce (void)
}
else if (target->type == PRECOLORED
|| TEST_BIT (sup_igraph, source->id * num_webs + target->id)
- || TEST_BIT (sup_igraph, target->id * num_webs + source->id))
+ || TEST_BIT (sup_igraph, target->id * num_webs + source->id)
+ || !ok_class (target, source))
{
remove_move (source, m);
remove_move (target, m);
@@ -996,8 +1004,7 @@ select_spill (void)
if (!bestd)
bestd = bestd3, best = best3;
}
- if (!bestd)
- abort ();
+ gcc_assert (bestd);
/* Note the potential spill. */
DLIST_WEB (bestd)->was_spilled = 1;
@@ -1020,7 +1027,7 @@ color_usable_p (int c, HARD_REG_SET dont_begin_colors,
&& HARD_REGNO_MODE_OK (c, mode))
{
int i, size;
- size = HARD_REGNO_NREGS (c, mode);
+ size = hard_regno_nregs[c][mode];
for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++);
if (i == size)
return 1;
@@ -1057,7 +1064,7 @@ get_free_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET free_colors,
&& HARD_REGNO_MODE_OK (c, mode))
{
int i, size;
- size = HARD_REGNO_NREGS (c, mode);
+ size = hard_regno_nregs[c][mode];
for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++);
if (i != size)
{
@@ -1153,15 +1160,15 @@ static char *
hardregset_to_string (HARD_REG_SET s)
{
static char string[/*FIRST_PSEUDO_REGISTER + 30*/1024];
-#if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDE_INT
- sprintf (string, HOST_WIDE_INT_PRINT_HEX, s);
+#if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDEST_FAST_INT
+ sprintf (string, HOST_WIDE_INT_PRINT_HEX, (HOST_WIDE_INT) s);
#else
char *c = string;
int i,j;
c += sprintf (c, "{ ");
for (i = 0;i < HARD_REG_SET_LONGS; i++)
{
- for (j = 0; j < HOST_BITS_PER_WIDE_INT; j++)
+ for (j = 0; j < HOST_BITS_PER_WIDEST_FAST_INT; j++)
c += sprintf (c, "%s", ( 1 << j) & s[i] ? "1" : "0");
c += sprintf (c, "%s", i ? ", " : "");
}
@@ -1201,11 +1208,11 @@ calculate_dont_begin (struct web *web, HARD_REG_SET *result)
if (ptarget->type == COLORED || ptarget->type == PRECOLORED)
{
struct web *source = (sl) ? sl->s : web;
- unsigned int tsize = HARD_REGNO_NREGS (ptarget->color,
- GET_MODE (w->orig_x));
+ unsigned int tsize = hard_regno_nregs[ptarget->color]
+ [GET_MODE (w->orig_x)];
/* ssize is only a first guess for the size. */
- unsigned int ssize = HARD_REGNO_NREGS (ptarget->color, GET_MODE
- (source->orig_x));
+ unsigned int ssize = hard_regno_nregs[ptarget->color][GET_MODE
+ (source->orig_x)];
unsigned int tofs = 0;
unsigned int sofs = 0;
/* C1 and C2 can become negative, so unsigned
@@ -1231,11 +1238,11 @@ calculate_dont_begin (struct web *web, HARD_REG_SET *result)
c1 to a place, where the last of sources hardregs does not
overlap the first of targets colors. */
while (c1 + sofs
- + HARD_REGNO_NREGS (c1, GET_MODE (source->orig_x)) - 1
+ + hard_regno_nregs[c1][GET_MODE (source->orig_x)] - 1
< ptarget->color + tofs)
c1++;
while (c1 > 0 && c1 + sofs
- + HARD_REGNO_NREGS (c1, GET_MODE (source->orig_x)) - 1
+ + hard_regno_nregs[c1][GET_MODE (source->orig_x)] - 1
> ptarget->color + tofs)
c1--;
for (; c1 <= c2; c1++)
@@ -1243,7 +1250,7 @@ calculate_dont_begin (struct web *web, HARD_REG_SET *result)
}
}
/* The next if() only gets true, if there was no wl->sub at all, in
- which case we are only making one go thru this loop with W being
+ which case we are only making one go through this loop with W being
a whole web. */
if (!sl)
break;
@@ -1302,6 +1309,7 @@ colorize_one_web (struct web *web, int hard)
HARD_REG_SET bias;
CLEAR_HARD_REG_SET (fat_colors);
+
if (SPILL_SLOT_P (web->regno))
hard = 0;
@@ -1485,19 +1493,18 @@ colorize_one_web (struct web *web, int hard)
if (c < 0)
{
/* Guard against a simplified node being spilled. */
- /* Don't abort. This can happen, when e.g. enough registers
+ /* Don't assert. This can happen, when e.g. enough registers
are available in colors, but they are not consecutive. This is a
very serious issue if this web is a short live one, because
even if we spill this one here, the situation won't become better
in the next iteration. It probably will have the same conflicts,
those will have the same colors, and we would come here again, for
- all parts, in which this one gets splitted by the spill. This
+ all parts, in which this one gets split by the spill. This
can result in endless iteration spilling the same register again and
again. That's why we try to find a neighbor, which spans more
instructions that ourself, and got a color, and try to spill _that_.
- if (DLIST_WEB (d)->was_spilled < 0)
- abort (); */
+ gcc_assert (DLIST_WEB (d)->was_spilled >= 0); */
if (hard && (!web->was_spilled || web->spill_temp))
{
unsigned int loop;
@@ -1656,8 +1663,7 @@ colorize_one_web (struct web *web, int hard)
int old_c = try->color;
if (try->type == COALESCED)
{
- if (alias (try)->type != PRECOLORED)
- abort ();
+ gcc_assert (alias (try)->type == PRECOLORED);
ra_debug_msg (DUMP_COLORIZE, " breaking alias %d -> %d\n",
try->id, alias (try)->id);
break_precolored_alias (try);
@@ -1715,7 +1721,7 @@ colorize_one_web (struct web *web, int hard)
web->color = c;
if (flag_ra_biased)
{
- int nregs = HARD_REGNO_NREGS (c, GET_MODE (web->orig_x));
+ int nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)];
for (wl = web->conflict_list; wl; wl = wl->next)
{
struct web *ptarget = alias (wl->t);
@@ -1816,7 +1822,7 @@ try_recolor_web (struct web *web)
int i, nregs;
if (!HARD_REGNO_MODE_OK (c, GET_MODE (web->orig_x)))
continue;
- nregs = HARD_REGNO_NREGS (c, GET_MODE (web->orig_x));
+ nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)];
for (i = 0; i < nregs; i++)
if (!TEST_HARD_REG_BIT (web->usable_regs, c + i))
break;
@@ -1866,14 +1872,14 @@ try_recolor_web (struct web *web)
/* Note that min_color[] contains 1-based values (zero means
undef). */
c1 = c1 == 0 ? web2->color : (c1 - 1);
- c2 = web2->color + HARD_REGNO_NREGS (web2->color, GET_MODE
- (web2->orig_x)) - 1;
+ c2 = web2->color + hard_regno_nregs[web2->color][GET_MODE
+ (web2->orig_x)] - 1;
for (; c1 <= c2; c1++)
if (TEST_HARD_REG_BIT (possible_begin, c1))
{
int nregs;
HARD_REG_SET colors;
- nregs = HARD_REGNO_NREGS (c1, GET_MODE (web->orig_x));
+ nregs = hard_regno_nregs[c1][GET_MODE (web->orig_x)];
COPY_HARD_REG_SET (colors, web2->usable_regs);
for (; nregs--;)
CLEAR_HARD_REG_BIT (colors, c1 + nregs);
@@ -1899,7 +1905,7 @@ try_recolor_web (struct web *web)
newcol = c;
if (newcol >= 0 && cost_neighbors[newcol] < web->spill_cost)
{
- int nregs = HARD_REGNO_NREGS (newcol, GET_MODE (web->orig_x));
+ int nregs = hard_regno_nregs[newcol][GET_MODE (web->orig_x)];
unsigned HOST_WIDE_INT cost = 0;
int *old_colors;
struct conflict_link *wl_next;
@@ -1922,8 +1928,8 @@ try_recolor_web (struct web *web)
wl_next = wl->next;
if (web2->type == COLORED)
{
- int nregs2 = HARD_REGNO_NREGS (web2->color, GET_MODE
- (web2->orig_x));
+ int nregs2 = hard_regno_nregs[web2->color][GET_MODE
+ (web2->orig_x)];
if (web->color >= web2->color + nregs2
|| web2->color >= web->color + nregs)
continue;
@@ -1947,9 +1953,8 @@ try_recolor_web (struct web *web)
above what happens, when wide webs are involved, and why in that
case there might actually be some webs spilled although thought to
be colorable. */
- if (cost > cost_neighbors[newcol]
- && nregs == 1 && !TEST_HARD_REG_BIT (wide_seen, newcol))
- abort ();
+ gcc_assert (cost <= cost_neighbors[newcol]
+ || nregs != 1 || TEST_HARD_REG_BIT (wide_seen, newcol));
/* But if the new spill-cost is higher than our own, then really loose.
Respill us and recolor neighbors as before. */
if (cost > web->spill_cost)
@@ -1964,26 +1969,29 @@ try_recolor_web (struct web *web)
struct web *web2 = alias (wl->t);
if (old_colors[web2->id])
{
- if (web2->type == SPILLED)
+ switch (web2->type)
{
+ case SPILLED:
remove_list (web2->dlink, &WEBS(SPILLED));
web2->color = old_colors[web2->id] - 1;
put_web (web2, COLORED);
+ break;
+ case COLORED:
+ web2->color = old_colors[web2->id] - 1;
+ break;
+ case SELECT:
+ /* This means, that WEB2 once was a part of a coalesced
+ web, which got spilled in the above colorize_one_web()
+ call, and whose parts then got split and put back
+ onto the SELECT stack. As the cause for that splitting
+ (the coloring of WEB) was worthless, we should again
+ coalesce the parts, as they were before. For now we
+ simply leave them SELECTed, for our caller to take
+ care. */
+ break;
+ default:
+ gcc_unreachable ();
}
- else if (web2->type == COLORED)
- web2->color = old_colors[web2->id] - 1;
- else if (web2->type == SELECT)
- /* This means, that WEB2 once was a part of a coalesced
- web, which got spilled in the above colorize_one_web()
- call, and whose parts then got splitted and put back
- onto the SELECT stack. As the cause for that splitting
- (the coloring of WEB) was worthless, we should again
- coalesce the parts, as they were before. For now we
- simply leave them SELECTed, for our caller to take
- care. */
- ;
- else
- abort ();
}
}
}
@@ -2013,7 +2021,7 @@ insert_coalesced_conflicts (void)
int i;
int nregs = 1 + web->add_hardregs;
if (aweb->type == PRECOLORED)
- nregs = HARD_REGNO_NREGS (aweb->color, GET_MODE (web->orig_x));
+ nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)];
for (i = 0; i < nregs; i++)
{
if (aweb->type == PRECOLORED)
@@ -2023,7 +2031,7 @@ insert_coalesced_conflicts (void)
when first some webs were coalesced and conflicts
propagated, then some combining narrowed usable_regs and
further coalescing ignored those conflicts. Now there are
- some edges to COALESCED webs but not to it's alias.
+ some edges to COALESCED webs but not to its alias.
So abort only when they really should conflict. */
if ((!(tweb->type == PRECOLORED
|| TEST_BIT (sup_igraph, tweb->id * num_webs + wl->t->id))
@@ -2038,9 +2046,9 @@ insert_coalesced_conflicts (void)
&& hard_regs_intersect_p (&tweb->usable_regs,
&wl->t->usable_regs))
{
- /*abort ();*/
+ /*gcc_assert (0);*/
fprintf (stderr, "SHIT. Lost a conflict in %s\n",
- cfun->name);
+ current_function_name ());
ra_debug_msg (DUMP_COLORIZE, "Lost conflict %d - %d\n",
tweb->id, wl->t->id);
if (wl->sub == NULL)
@@ -2125,19 +2133,35 @@ check_colors (void)
struct web *web = id2web[i];
struct web *aweb = alias (web);
struct conflict_link *wl;
- int nregs, c;
+ int nregs;
if (aweb->type == SPILLED || SPILL_SLOT_P (web->regno))
continue;
- else if (aweb->type == COLORED)
- nregs = HARD_REGNO_NREGS (aweb->color, GET_MODE (web->orig_x));
- else if (aweb->type == PRECOLORED)
- nregs = 1;
- else
- abort ();
- /* The color must be valid for the original usable_regs. */
- for (c = 0; c < nregs; c++)
- if (!TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c))
- abort ();
+
+ switch (aweb->type)
+ {
+ case SPILLED:
+ continue;
+
+ case COLORED:
+ nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)];
+ break;
+
+ case PRECOLORED:
+ nregs = 1;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+#ifdef ENABLE_CHECKING
+ /* The color must be valid for the original usable_regs. */
+ {
+ int c;
+ for (c = 0; c < nregs; c++)
+ gcc_assert (TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c));
+ }
+#endif
/* Search the original (pre-coalesce) conflict list. In the current
one some imprecise conflicts may be noted (due to combine() or
insert_coalesced_conflicts() relocating partial conflicts) making
@@ -2153,7 +2177,7 @@ check_colors (void)
struct web *web2 = alias (wl->t);
int nregs2;
if (web2->type == COLORED)
- nregs2 = HARD_REGNO_NREGS (web2->color, GET_MODE (web2->orig_x));
+ nregs2 = hard_regno_nregs[web2->color][GET_MODE (web2->orig_x)];
else if (web2->type == PRECOLORED)
nregs2 = 1;
else
@@ -2176,8 +2200,8 @@ check_colors (void)
continue;
for (sl = wl->sub; sl; sl = sl->next)
{
- int ssize = HARD_REGNO_NREGS (scol, GET_MODE (sl->s->orig_x));
- int tsize = HARD_REGNO_NREGS (tcol, GET_MODE (sl->t->orig_x));
+ int ssize = hard_regno_nregs[scol][GET_MODE (sl->s->orig_x)];
+ int tsize = hard_regno_nregs[tcol][GET_MODE (sl->t->orig_x)];
int sofs = 0, tofs = 0;
if (SUBWEB_P (sl->t)
&& GET_MODE_SIZE (GET_MODE (sl->t->orig_x)) >= UNITS_PER_WORD)
@@ -2235,8 +2259,7 @@ static void
break_aliases_to_web (struct web *web)
{
struct dlist *d, *d_next;
- if (web->type != SPILLED)
- abort ();
+ gcc_assert (web->type == SPILLED);
for (d = WEBS(COALESCED); d; d = d_next)
{
struct web *other = DLIST_WEB (d);
@@ -2279,10 +2302,9 @@ break_precolored_alias (struct web *web)
struct web *pre = web->alias;
struct conflict_link *wl;
unsigned int c = pre->color;
- unsigned int nregs = HARD_REGNO_NREGS (c, GET_MODE (web->orig_x));
+ unsigned int nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)];
struct dlist *d;
- if (pre->type != PRECOLORED)
- abort ();
+ gcc_assert (pre->type == PRECOLORED);
unalias_web (web);
/* Now we need to look at each conflict X of WEB, if it conflicts
with [PRE, PRE+nregs), and remove such conflicts, of X has not other
@@ -2427,13 +2449,12 @@ restore_conflicts_from_coalesce (struct web *web)
struct conflict_link *owl;
wl = *pcl;
*pcl = wl->next;
- if (!other->have_orig_conflicts && other->type != PRECOLORED)
- abort ();
+ gcc_assert (other->have_orig_conflicts
+ || other->type == PRECOLORED);
for (owl = other->orig_conflict_list; owl; owl = owl->next)
if (owl->t == web)
break;
- if (owl)
- abort ();
+ gcc_assert (!owl);
opcl = &(other->conflict_list);
while (*opcl)
{
@@ -2448,8 +2469,7 @@ restore_conflicts_from_coalesce (struct web *web)
opcl = &((*opcl)->next);
}
}
- if (!owl && other->type != PRECOLORED)
- abort ();
+ gcc_assert (owl || other->type == PRECOLORED);
/*ra_debug_msg (DUMP_COLORIZE, "delete conflict %d - %d\n", web->id,
other->id);*/
/* wl and owl contain the edge data to be deleted. */
@@ -2707,8 +2727,7 @@ non_conflicting_for_combine (struct web *w1, struct web *w2)
&& !TEST_BIT (sup_igraph, w2->id * num_webs + w1->id))
return 1;
/* Don't call us with subwebs. */
- if (w1->parent_web || w2->parent_web)
- abort ();
+ gcc_assert (w1->parent_web == 0 && w2->parent_web == 0);
/* We know that the webs have at least some conflicts. Now check
if we can coalesce them anyway. We can do this if by coalescing
@@ -2751,8 +2770,7 @@ sort_and_combine_web_pairs (int for_move)
sorted = xmalloc (num_web_pairs * sizeof (sorted[0]));
for (p = web_pair_list, i = 0; p; p = p->next_list)
sorted[i++] = p;
- if (i != num_web_pairs)
- abort ();
+ gcc_assert (i == num_web_pairs);
qsort (sorted, num_web_pairs, sizeof (sorted[0]), comp_web_pairs);
/* After combining one pair, we actually should adjust the savings
@@ -2788,6 +2806,39 @@ sort_and_combine_web_pairs (int for_move)
free (sorted);
}
+/* Returns nonzero if source/target reg classes are ok for coalesce. */
+
+static int
+ok_class (struct web *target, struct web *source)
+{
+ /* Don't coalesce if preferred classes are different and at least one
+ of them has a size of 1. This was preventing things such as the
+ branch on count transformation (i.e. DoLoop) since the target, which
+ prefers the CTR, was being coalesced with a source which preferred
+ GENERAL_REGS. If only one web has a preferred class with 1 free reg
+ then set it as the preferred color of the other web. */
+ enum reg_class t_class, s_class;
+ t_class = reg_preferred_class (target->regno);
+ s_class = reg_preferred_class (source->regno);
+ if (t_class != s_class)
+ {
+ if (num_free_regs[t_class] == 1)
+ {
+ if (num_free_regs[s_class] != 1)
+ SET_HARD_REG_BIT (source->prefer_colors,
+ single_reg_in_regclass[t_class]);
+ return 0;
+ }
+ else if (num_free_regs[s_class] == 1)
+ {
+ SET_HARD_REG_BIT (target->prefer_colors,
+ single_reg_in_regclass[s_class]);
+ return 0;
+ }
+ }
+ return 1;
+}
+
/* Greedily coalesce all moves possible. Begin with the web pair
giving the most saving if coalesced. */
@@ -2810,7 +2861,8 @@ aggressive_coalesce (void)
}
if (s != t
&& t->type != PRECOLORED
- && non_conflicting_for_combine (s, t))
+ && non_conflicting_for_combine (s, t)
+ && ok_class (t, s))
{
if ((s->type == PRECOLORED && ok (t, s))
|| s->type != PRECOLORED)
@@ -2888,6 +2940,7 @@ ATTRIBUTE_UNUSED
extended_coalesce (void)
{
rtx insn;
+ bitmap_iterator bi;
bitmap defs = BITMAP_XMALLOC ();
bitmap uses = BITMAP_XMALLOC ();
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
@@ -2918,10 +2971,11 @@ extended_coalesce (void)
if (!num)
continue;
- EXECUTE_IF_SET_IN_BITMAP (defs, 0, i,
+ EXECUTE_IF_SET_IN_BITMAP (defs, 0, i, bi)
{
+ bitmap_iterator bj;
struct web *dest = id2web[i];
- EXECUTE_IF_SET_IN_BITMAP (uses, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (uses, 0, j, bj)
{
if (i != j
&& non_conflicting_for_combine (dest, ID2WEB (j)))
@@ -2934,8 +2988,8 @@ extended_coalesce (void)
goto out;
}
}
- });
- });
+ }
+ }
out:
/* ANSI C is broken. It forbids labels at end of compound statements,
so fake a non-end. */
@@ -3017,6 +3071,7 @@ extended_coalesce_2 (void)
of equal modes. */
&& GET_MODE (source->orig_x) == GET_MODE (dest->orig_x)
&& hard_regs_combinable_p (dest, source)
+ && ok_class (dest, source)
&& non_conflicting_for_combine (source, dest))
add_web_pair_cost (dest, source,
BLOCK_FOR_INSN (insn)->frequency,
@@ -3046,16 +3101,17 @@ check_uncoalesced_moves (void)
s = t;
t = h;
}
- if (s != t
- && m->type != CONSTRAINED
- /* Following can happen when a move was coalesced, but later
- broken up again. Then s!=t, but m is still MV_COALESCED. */
- && m->type != MV_COALESCED
- && t->type != PRECOLORED
- && ((s->type == PRECOLORED && ok (t, s))
- || s->type != PRECOLORED)
- && non_conflicting_for_combine (s, t))
- abort ();
+ gcc_assert (s == t
+ || m->type == CONSTRAINED
+ /* Following can happen when a move was coalesced, but
+ later broken up again. Then s!=t, but m is still
+ MV_COALESCED. */
+ || m->type == MV_COALESCED
+ || t->type == PRECOLORED
+ || (s->type == PRECOLORED && !ok (t, s))
+ || !((s->type == PRECOLORED && ok (t, s))
+ || s->type != PRECOLORED)
+ || !non_conflicting_for_combine (s, t));
}
}
@@ -3066,7 +3122,7 @@ check_uncoalesced_moves (void)
void
ra_colorize_graph (struct df *df)
{
- if (rtl_dump_file)
+ if (dump_file)
dump_igraph (df);
build_worklists (df);
diff --git a/gcc/ra-debug.c b/gcc/ra-debug.c
index 92699826a47..6ee7ee4f8bc 100644
--- a/gcc/ra-debug.c
+++ b/gcc/ra-debug.c
@@ -1,5 +1,5 @@
/* Graph coloring register allocator
- Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Matz <matz@suse.de>
and Daniel Berlin <dan@cgsoftware.com>.
@@ -32,6 +32,7 @@
#include "output.h"
#include "ra.h"
#include "tm_p.h"
+#include "regs.h"
/* This file contains various dumping and debug functions for
the graph coloring register allocator. */
@@ -62,8 +63,8 @@ ra_debug_msg (unsigned int level, const char *format, ...)
va_list ap;
va_start (ap, format);
- if ((debug_new_regalloc & level) != 0 && rtl_dump_file != NULL)
- vfprintf (rtl_dump_file, format, ap);
+ if ((debug_new_regalloc & level) != 0 && dump_file != NULL)
+ vfprintf (dump_file, format, ap);
va_end (ap);
}
@@ -138,9 +139,11 @@ ra_print_rtx_2op (FILE *file, rtx x)
case AND: opname = "&"; break;
case IOR: opname = "|"; break;
case XOR: opname = "^"; break;
- /* class '<' */
+ /* class '=' */
case NE: opname = "!="; break;
case EQ: opname = "=="; break;
+ case LTGT: opname = "<>"; break;
+ /* class '<' */
case GE: opname = "s>="; break;
case GT: opname = "s>"; break;
case LE: opname = "s<="; break;
@@ -204,7 +207,7 @@ ra_print_rtx_3op (FILE *file, rtx x)
}
}
-/* Print rtx X, which represents an object (class 'o' or some constructs
+/* Print rtx X, which represents an object (class 'o', 'C', or some constructs
of class 'x' (e.g. subreg)), to FILE.
(reg XX) rtl is represented as "pXX", of XX was a pseudo,
as "name" it name is the nonnull hardreg name, or as "hXX", if XX
@@ -254,7 +257,7 @@ ra_print_rtx_object (FILE *file, rtx x)
int regno = REGNO (x);
if (regno < FIRST_PSEUDO_REGISTER)
{
- int i, nregs = HARD_REGNO_NREGS (regno, mode);
+ int i, nregs = hard_regno_nregs[regno][mode];
if (nregs > 1)
fputs ("[", file);
for (i = 0; i < nregs; i++)
@@ -281,11 +284,11 @@ ra_print_rtx_object (FILE *file, rtx x)
{
rtx sub = SUBREG_REG (x);
int ofs = SUBREG_BYTE (x);
- if (GET_CODE (sub) == REG
+ if (REG_P (sub)
&& REGNO (sub) < FIRST_PSEUDO_REGISTER)
{
int regno = REGNO (sub);
- int i, nregs = HARD_REGNO_NREGS (regno, mode);
+ int i, nregs = hard_regno_nregs[regno][mode];
regno += subreg_regno_offset (regno, GET_MODE (sub),
ofs, mode);
if (nregs > 1)
@@ -327,10 +330,10 @@ ra_print_rtx_object (FILE *file, rtx x)
case LABEL_REF:
{
rtx sub = XEXP (x, 0);
- if (GET_CODE (sub) == NOTE
+ if (NOTE_P (sub)
&& NOTE_LINE_NUMBER (sub) == NOTE_INSN_DELETED_LABEL)
fprintf (file, "(deleted uid=%d)", INSN_UID (sub));
- else if (GET_CODE (sub) == CODE_LABEL)
+ else if (LABEL_P (sub))
fprintf (file, "L%d", CODE_LABEL_NUMBER (sub));
else
fprintf (file, "(nonlabel uid=%d)", INSN_UID (sub));
@@ -352,12 +355,10 @@ void
ra_print_rtx (FILE *file, rtx x, int with_pn)
{
enum rtx_code code;
- char class;
int unhandled = 0;
if (!x)
return;
code = GET_CODE (x);
- class = GET_RTX_CLASS (code);
/* First handle the insn like constructs. */
if (INSN_P (x) || code == NOTE || code == CODE_LABEL || code == BARRIER)
@@ -403,9 +404,11 @@ ra_print_rtx (FILE *file, rtx x, int with_pn)
fprintf (file, " %s", GET_NOTE_INSN_NAME (ln));
else
{
- fprintf (file, " line %d", ln);
- if (NOTE_SOURCE_FILE (x))
- fprintf (file, ":%s", NOTE_SOURCE_FILE (x));
+ expanded_location s;
+ NOTE_EXPANDED_LOCATION (s, x);
+ fprintf (file, " line %d", s.line);
+ if (s.file != NULL)
+ fprintf (file, ":%s", s.file);
}
}
else
@@ -506,16 +509,29 @@ ra_print_rtx (FILE *file, rtx x, int with_pn)
}
if (!unhandled)
return;
- if (class == '1')
- ra_print_rtx_1op (file, x);
- else if (class == '2' || class == 'c' || class == '<')
- ra_print_rtx_2op (file, x);
- else if (class == '3' || class == 'b')
- ra_print_rtx_3op (file, x);
- else if (class == 'o')
- ra_print_rtx_object (file, x);
- else
- print_inline_rtx (file, x, 0);
+ switch (GET_RTX_CLASS (code))
+ {
+ case RTX_UNARY:
+ ra_print_rtx_1op (file, x);
+ break;
+ case RTX_BIN_ARITH:
+ case RTX_COMM_ARITH:
+ case RTX_COMPARE:
+ case RTX_COMM_COMPARE:
+ ra_print_rtx_2op (file, x);
+ break;
+ case RTX_TERNARY:
+ case RTX_BITFIELD_OPS:
+ ra_print_rtx_3op (file, x);
+ break;
+ case RTX_OBJ:
+ case RTX_CONST_OBJ:
+ ra_print_rtx_object (file, x);
+ break;
+ default:
+ print_inline_rtx (file, x, 0);
+ break;
+ }
}
/* This only calls ra_print_rtx(), but emits a final newline. */
@@ -543,11 +559,12 @@ ra_debug_bbi (int bbi)
{
basic_block bb = BASIC_BLOCK (bbi);
rtx insn;
- for (insn = bb->head; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn))
{
- ra_print_rtx_top (stderr, insn, (insn == bb->head || insn == bb->end));
+ ra_print_rtx_top (stderr, insn,
+ (insn == BB_HEAD (bb) || insn == BB_END (bb)));
fprintf (stderr, "\n");
- if (insn == bb->end)
+ if (insn == BB_END (bb))
break;
}
}
@@ -564,7 +581,7 @@ ra_debug_insns (rtx insn, int num)
insn = PREV_INSN (insn);
for (i = count; i > 0 && insn; insn = NEXT_INSN (insn), i--)
{
- if (GET_CODE (insn) == CODE_LABEL)
+ if (LABEL_P (insn))
fprintf (stderr, "\n");
ra_print_rtx_top (stderr, insn, (i == count || i == 1));
}
@@ -584,7 +601,7 @@ ra_print_rtl_with_bb (FILE *file, rtx insn)
last_bb = NULL;
for (; insn; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) == BARRIER)
+ if (BARRIER_P (insn))
bb = NULL;
else
bb = BLOCK_FOR_INSN (insn);
@@ -596,9 +613,9 @@ ra_print_rtl_with_bb (FILE *file, rtx insn)
fprintf (file, ";; Begin of basic block %d\n", bb->index);
last_bb = bb;
}
- if (GET_CODE (insn) == CODE_LABEL)
+ if (LABEL_P (insn))
fputc ('\n', file);
- if (GET_CODE (insn) == NOTE)
+ if (NOTE_P (insn))
{
/* Ignore basic block and maybe other notes not referencing
deleted things. */
@@ -656,7 +673,7 @@ dump_igraph (struct df *df ATTRIBUTE_UNUSED)
int num = 0;
int num2;
unsigned int i;
- if (!rtl_dump_file || (debug_new_regalloc & (DUMP_IGRAPH | DUMP_WEBS)) == 0)
+ if (!dump_file || (debug_new_regalloc & (DUMP_IGRAPH | DUMP_WEBS)) == 0)
return;
ra_debug_msg (DUMP_IGRAPH, "conflicts:\n ");
for (def1 = 0; def1 < num_webs; def1++)
@@ -748,7 +765,7 @@ dump_igraph_machine (void)
{
unsigned int i;
- if (!rtl_dump_file || (debug_new_regalloc & DUMP_IGRAPH_M) == 0)
+ if (!dump_file || (debug_new_regalloc & DUMP_IGRAPH_M) == 0)
return;
ra_debug_msg (DUMP_IGRAPH_M, "g %d %d\n", num_webs - num_subwebs,
FIRST_PSEUDO_REGISTER);
@@ -808,10 +825,10 @@ dump_constraints (void)
{
rtx insn;
int i;
- if (!rtl_dump_file || (debug_new_regalloc & DUMP_CONSTRAINTS) == 0)
+ if (!dump_file || (debug_new_regalloc & DUMP_CONSTRAINTS) == 0)
return;
for (i = FIRST_PSEUDO_REGISTER; i < ra_max_regno; i++)
- if (regno_reg_rtx[i] && GET_CODE (regno_reg_rtx[i]) == REG)
+ if (regno_reg_rtx[i] && REG_P (regno_reg_rtx[i]))
REGNO (regno_reg_rtx[i])
= ra_reg_renumber[i] >= 0 ? ra_reg_renumber[i] : i;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
@@ -849,7 +866,7 @@ dump_constraints (void)
ra_debug_msg (DUMP_CONSTRAINTS, "\n");
}
for (i = FIRST_PSEUDO_REGISTER; i < ra_max_regno; i++)
- if (regno_reg_rtx[i] && GET_CODE (regno_reg_rtx[i]) == REG)
+ if (regno_reg_rtx[i] && REG_P (regno_reg_rtx[i]))
REGNO (regno_reg_rtx[i]) = i;
}
@@ -861,7 +878,7 @@ dump_graph_cost (unsigned int level, const char *msg)
{
unsigned int i;
unsigned HOST_WIDE_INT cost;
- if (!rtl_dump_file || (debug_new_regalloc & level) == 0)
+ if (!dump_file || (debug_new_regalloc & level) == 0)
return;
cost = 0;
@@ -883,7 +900,7 @@ dump_ra (struct df *df ATTRIBUTE_UNUSED)
{
struct web *web;
struct dlist *d;
- if (!rtl_dump_file || (debug_new_regalloc & DUMP_RESULTS) == 0)
+ if (!dump_file || (debug_new_regalloc & DUMP_RESULTS) == 0)
return;
ra_debug_msg (DUMP_RESULTS, "\nColored:\n");
@@ -935,7 +952,7 @@ dump_static_insn_cost (FILE *file, const char *message, const char *prefix)
{
unsigned HOST_WIDE_INT block_cost = bb->frequency;
rtx insn, set;
- for (insn = bb->head; insn; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn))
{
/* Yes, yes. We don't calculate the costs precisely.
Only for "simple enough" insns. Those containing single
@@ -950,10 +967,11 @@ dump_static_insn_cost (FILE *file, const char *message, const char *prefix)
if (rtx_equal_p (src, dest))
pcost = &selfcopy;
else if (GET_CODE (src) == GET_CODE (dest)
- && ((GET_CODE (src) == REG)
+ && ((REG_P (src))
|| (GET_CODE (src) == SUBREG
- && GET_CODE (SUBREG_REG (src)) == REG
- && GET_CODE (SUBREG_REG (dest)) == REG)))
+ && REG_P (SUBREG_REG (src))
+ && REG_P (SUBREG_REG (dest)))))
+ /* XXX is dest guaranteed to be a subreg? */
pcost = &regcopy;
else
{
@@ -961,10 +979,10 @@ dump_static_insn_cost (FILE *file, const char *message, const char *prefix)
src = SUBREG_REG (src);
if (GET_CODE (dest) == SUBREG)
dest = SUBREG_REG (dest);
- if (GET_CODE (src) == MEM && GET_CODE (dest) != MEM
+ if (MEM_P (src) && !MEM_P (dest)
&& memref_is_stack_slot (src))
pcost = &load;
- else if (GET_CODE (src) != MEM && GET_CODE (dest) == MEM
+ else if (!MEM_P (src) && MEM_P (dest)
&& memref_is_stack_slot (dest))
pcost = &store;
}
@@ -974,7 +992,7 @@ dump_static_insn_cost (FILE *file, const char *message, const char *prefix)
pcost->count++;
}
}
- if (insn == bb->end)
+ if (insn == BB_END (bb))
break;
}
}
diff --git a/gcc/ra-rewrite.c b/gcc/ra-rewrite.c
index 02e84113a1f..87b1edd29a9 100644
--- a/gcc/ra-rewrite.c
+++ b/gcc/ra-rewrite.c
@@ -1,5 +1,5 @@
/* Graph coloring register allocator
- Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Contributed by Michael Matz <matz@suse.de>
and Daniel Berlin <dan@cgsoftware.com>.
@@ -140,8 +140,8 @@ spill_coalescing (sbitmap coalesce, sbitmap spilled)
T from the web which was coalesced into T, which at the time
of combine() were not already on the SELECT stack or were
itself coalesced to something other. */
- if (t->type != SPILLED || s->type != SPILLED)
- abort ();
+ gcc_assert (t->type == SPILLED
+ && s->type == SPILLED);
remove_list (t->dlink, &WEBS(SPILLED));
put_web (t, COALESCED);
t->alias = s;
@@ -353,7 +353,6 @@ allocate_spill_web (struct web *web)
alloc_slot:
slot = assign_stack_local (PSEUDO_REGNO_MODE (regno), total_size,
inherent_size == total_size ? 0 : -1);
- RTX_UNCHANGING_P (slot) = RTX_UNCHANGING_P (regno_reg_rtx[regno]);
set_mem_alias_set (slot, new_alias_set ());
web->stack_slot = slot;
*/
@@ -397,7 +396,7 @@ choose_spill_colors (void)
&& HARD_REGNO_MODE_OK (c, PSEUDO_REGNO_MODE (web->regno)))
{
int i, size;
- size = HARD_REGNO_NREGS (c, PSEUDO_REGNO_MODE (web->regno));
+ size = hard_regno_nregs[c][PSEUDO_REGNO_MODE (web->regno)];
for (i = 1; i < size
&& TEST_HARD_REG_BIT (avail, c + i); i++);
if (i == size)
@@ -493,8 +492,8 @@ rewrite_program (bitmap new_deaths)
end_sequence ();
emit_insn_before (insns, insn);
- if (bb->head == insn)
- bb->head = NEXT_INSN (prev);
+ if (BB_HEAD (bb) == insn)
+ BB_HEAD (bb) = NEXT_INSN (prev);
for (insn = PREV_INSN (insn); insn != prev;
insn = PREV_INSN (insn))
{
@@ -543,8 +542,8 @@ rewrite_program (bitmap new_deaths)
if (insns)
{
emit_insn_after (insns, insn);
- if (bb->end == insn)
- bb->end = PREV_INSN (following);
+ if (BB_END (bb) == insn)
+ BB_END (bb) = PREV_INSN (following);
for (insn = insns; insn != following;
insn = NEXT_INSN (insn))
{
@@ -619,7 +618,7 @@ slots_overlap_p (rtx s1, rtx s2)
if (GET_CODE (s1) != GET_CODE (s2))
return 0;
- if (GET_CODE (s1) == REG && GET_CODE (s2) == REG)
+ if (REG_P (s1) && REG_P (s2))
{
if (REGNO (s1) != REGNO (s2))
return 0;
@@ -627,14 +626,13 @@ slots_overlap_p (rtx s1, rtx s2)
return 0;
return 1;
}
- if (GET_CODE (s1) != MEM || GET_CODE (s2) != MEM)
- abort ();
+ gcc_assert (MEM_P (s1) && GET_CODE (s2) == MEM);
s1 = XEXP (s1, 0);
s2 = XEXP (s2, 0);
- if (GET_CODE (s1) != PLUS || GET_CODE (XEXP (s1, 0)) != REG
+ if (GET_CODE (s1) != PLUS || !REG_P (XEXP (s1, 0))
|| GET_CODE (XEXP (s1, 1)) != CONST_INT)
return 1;
- if (GET_CODE (s2) != PLUS || GET_CODE (XEXP (s2, 0)) != REG
+ if (GET_CODE (s2) != PLUS || !REG_P (XEXP (s2, 0))
|| GET_CODE (XEXP (s2, 1)) != CONST_INT)
return 1;
base1 = XEXP (s1, 0);
@@ -725,7 +723,7 @@ insert_stores (bitmap new_deaths)
/* If we reach a basic block border, which has more than one
outgoing edge, we simply forget all already emitted stores. */
- if (GET_CODE (insn) == BARRIER
+ if (BARRIER_P (insn)
|| JUMP_P (insn) || can_throw_internal (insn))
{
last_slot = NULL_RTX;
@@ -840,8 +838,8 @@ insert_stores (bitmap new_deaths)
if (insns)
{
emit_insn_after (insns, insn);
- if (bb->end == insn)
- bb->end = PREV_INSN (following);
+ if (BB_END (bb) == insn)
+ BB_END (bb) = PREV_INSN (following);
for (ni = insns; ni != following; ni = NEXT_INSN (ni))
{
set_block_for_insn (ni, bb);
@@ -910,9 +908,9 @@ insert_stores (bitmap new_deaths)
{
/* rtx d = SET_DEST (set); */
note_uses_partial (&set, delete_overlapping_uses, (void *)&slots);
- /*if (1 || GET_CODE (SET_SRC (set)) == MEM)
+ /*if (1 || MEM_P (SET_SRC (set)))
delete_overlapping_slots (&slots, SET_SRC (set));*/
- /*if (REG_P (d) || GET_CODE (d) == MEM
+ /*if (REG_P (d) || MEM_P (d)
|| (GET_CODE (d) == SUBREG && REG_P (SUBREG_REG (d))))
remember_slot (&slots, d);*/
}
@@ -933,9 +931,9 @@ spill_same_color_p (struct web *web1, struct web *web2)
return 0;
size1 = web1->type == PRECOLORED
- ? 1 : HARD_REGNO_NREGS (c1, PSEUDO_REGNO_MODE (web1->regno));
+ ? 1 : hard_regno_nregs[c1][PSEUDO_REGNO_MODE (web1->regno)];
size2 = web2->type == PRECOLORED
- ? 1 : HARD_REGNO_NREGS (c2, PSEUDO_REGNO_MODE (web2->regno));
+ ? 1 : hard_regno_nregs[c2][PSEUDO_REGNO_MODE (web2->regno)];
if (c1 >= c2 + size2 || c2 >= c1 + size1)
return 0;
return 1;
@@ -970,7 +968,7 @@ update_spill_colors (HARD_REG_SET *in_use, struct web *web, int add)
if ((c = alias (find_web_for_subweb (web))->color) < 0
|| c == an_unusable_color)
return;
- size = HARD_REGNO_NREGS (c, GET_MODE (web->orig_x));
+ size = hard_regno_nregs[c][GET_MODE (web->orig_x)];
if (SUBWEB_P (web))
{
c += subreg_regno_offset (c, GET_MODE (SUBREG_REG (web->orig_x)),
@@ -1001,7 +999,7 @@ spill_is_free (HARD_REG_SET *in_use, struct web *web)
if (c == an_unusable_color)
return 1;
size = web->type == PRECOLORED
- ? 1 : HARD_REGNO_NREGS (c, PSEUDO_REGNO_MODE (web->regno));
+ ? 1 : hard_regno_nregs[c][PSEUDO_REGNO_MODE (web->regno)];
for (; size--;)
if (TEST_HARD_REG_BIT (*in_use, c + size))
return 0;
@@ -1067,8 +1065,7 @@ emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn)
if (!web)
continue;
supweb = find_web_for_subweb (web);
- if (SPILL_SLOT_P (supweb->regno))
- abort ();
+ gcc_assert (!SPILL_SLOT_P (supweb->regno));
/* Check for web being a spilltemp, if we only want to
load spilltemps. Also remember, that we emitted that
load, which we don't need to do when we have a death,
@@ -1095,8 +1092,7 @@ emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn)
(at least then disallow spilling them, which we already ensure
when flag_ra_break_aliases), or not take the pattern but a
stackslot. */
- if (aweb != supweb)
- abort ();
+ gcc_assert (aweb == supweb);
slot = copy_rtx (supweb->pattern);
innermode = GET_MODE (supweb->orig_x);
}
@@ -1218,8 +1214,8 @@ emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn)
rtx foll = NEXT_INSN (after);
bb = BLOCK_FOR_INSN (after);
emit_insn_after (ni, after);
- if (bb->end == after)
- bb->end = PREV_INSN (foll);
+ if (BB_END (bb) == after)
+ BB_END (bb) = PREV_INSN (foll);
for (ni = NEXT_INSN (after); ni != foll; ni = NEXT_INSN (ni))
{
set_block_for_insn (ni, bb);
@@ -1233,8 +1229,8 @@ emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn)
rtx prev = PREV_INSN (before);
bb = BLOCK_FOR_INSN (before);
emit_insn_before (ni, before);
- if (bb->head == before)
- bb->head = NEXT_INSN (prev);
+ if (BB_HEAD (bb) == before)
+ BB_HEAD (bb) = NEXT_INSN (prev);
for (; ni != before; ni = NEXT_INSN (ni))
{
set_block_for_insn (ni, bb);
@@ -1258,9 +1254,7 @@ emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn)
/* Test LIVE for partial WEB live. */
int
-is_partly_dead (live, web)
- sbitmap live;
- struct web *web;
+is_partly_dead (sbitmap live, struct web *web)
{
struct web *sweb;
@@ -1276,9 +1270,7 @@ is_partly_dead (live, web)
/* Set live bit in LIVE for WEB or all his subwebs. */
void
-set_web_live (live, web)
- sbitmap live;
- struct web *web;
+set_web_live (sbitmap live, struct web *web)
{
struct web *sweb;
@@ -1291,9 +1283,7 @@ set_web_live (live, web)
/* Reset live bit in LIVE for WEB or all his subwebs. */
void
-reset_web_live (live, web)
- sbitmap live;
- struct web *web;
+reset_web_live (sbitmap live, struct web *web)
{
struct web *sweb;
@@ -1336,13 +1326,14 @@ reloads_to_loads (struct rewrite_info *ri, struct ref **refs,
if (is_partly_dead (ri->live, web))
{
int old_num_r = num_reloads;
+ bitmap_iterator bi;
+
bitmap_clear (ri->scratch);
- EXECUTE_IF_SET_IN_BITMAP (ri->need_reload, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (ri->need_reload, 0, j, bi)
{
struct web *web2 = ID2WEB (j);
struct web *aweb2 = alias (find_web_for_subweb (web2));
- if (spill_is_free (&(ri->colors_in_use), aweb2) == 0)
- abort ();
+ gcc_assert (spill_is_free (&(ri->colors_in_use), aweb2) != 0);
if (spill_same_color_p (supweb, aweb2)
/* && interfere (web, web2) */)
{
@@ -1354,7 +1345,7 @@ reloads_to_loads (struct rewrite_info *ri, struct ref **refs,
bitmap_set_bit (ri->scratch, j);
num_reloads--;
}
- });
+ }
if (num_reloads != old_num_r)
bitmap_operation (ri->need_reload, ri->need_reload, ri->scratch,
BITMAP_AND_COMPL);
@@ -1386,6 +1377,8 @@ rewrite_program2 (bitmap new_deaths)
basic_block last_bb = NULL;
rtx last_block_insn;
int i, j;
+ bitmap_iterator bi;
+
if (!INSN_P (insn))
insn = prev_real_insn (insn);
while (insn && !(bb = BLOCK_FOR_INSN (insn)))
@@ -1397,7 +1390,7 @@ rewrite_program2 (bitmap new_deaths)
sbitmap_zero (ri.live);
CLEAR_HARD_REG_SET (ri.colors_in_use);
- EXECUTE_IF_SET_IN_BITMAP (live_at_end[i - 2], 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (live_at_end[i - 2], 0, j, bi)
{
struct web *web = use2web[j];
struct web *aweb = alias (find_web_for_subweb (web));
@@ -1414,7 +1407,7 @@ rewrite_program2 (bitmap new_deaths)
if (aweb->type != SPILLED)
update_spill_colors (&(ri.colors_in_use), web, 1);
}
- });
+ }
bitmap_clear (ri.need_reload);
ri.num_reloads = 0;
@@ -1450,10 +1443,15 @@ rewrite_program2 (bitmap new_deaths)
unsigned int n;
HARD_REG_SET earlyclobber_colors;
+ /* XXX only needed to avoid warning. Should be avoided. */
+ memset (&info, 0, sizeof info);
+
if (INSN_P (insn) && BLOCK_FOR_INSN (insn) != last_bb)
{
int index = BLOCK_FOR_INSN (insn)->index + 2;
- EXECUTE_IF_SET_IN_BITMAP (live_at_end[index - 2], 0, j,
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (live_at_end[index - 2], 0, j, bi)
{
struct web *web = use2web[j];
struct web *aweb = alias (find_web_for_subweb (web));
@@ -1462,9 +1460,9 @@ rewrite_program2 (bitmap new_deaths)
set_web_live (ri.live, web);
update_spill_colors (&(ri.colors_in_use), web, 1);
}
- });
+ }
bitmap_clear (ri.scratch);
- EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j, bi)
{
struct web *web2 = ID2WEB (j);
struct web *supweb2 = find_web_for_subweb (web2);
@@ -1479,7 +1477,7 @@ rewrite_program2 (bitmap new_deaths)
bitmap_set_bit (ri.scratch, j);
ri.num_reloads--;
}
- });
+ }
bitmap_operation (ri.need_reload, ri.need_reload, ri.scratch,
BITMAP_AND_COMPL);
last_bb = BLOCK_FOR_INSN (insn);
@@ -1577,7 +1575,7 @@ rewrite_program2 (bitmap new_deaths)
XXX Note, that sometimes reload barfs when we emit insns between
a call and the insn which copies the return register into a
pseudo. */
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
ri.need_load = 1;
else if (INSN_P (insn))
for (n = 0; n < info.num_uses; n++)
@@ -1672,7 +1670,7 @@ rewrite_program2 (bitmap new_deaths)
/* Now that the effect of this insn are all handled the colors
of early clobber operand are free. */
AND_COMPL_HARD_REG_SET (ri.colors_in_use, earlyclobber_colors);
- if (GET_CODE (insn) == CODE_LABEL)
+ if (LABEL_P (insn))
break;
}
@@ -1682,26 +1680,33 @@ rewrite_program2 (bitmap new_deaths)
int in_ir = 0;
edge e;
int num = 0;
+ edge_iterator ei;
+ bitmap_iterator bi;
+
HARD_REG_SET cum_colors, colors;
CLEAR_HARD_REG_SET (cum_colors);
- for (e = bb->pred; e && num < 5; e = e->pred_next, num++)
+ FOR_EACH_EDGE (e, ei, bb->preds)
{
int j;
+
+ if (num >= 5)
+ break;
CLEAR_HARD_REG_SET (colors);
- EXECUTE_IF_SET_IN_BITMAP (live_at_end[e->src->index], 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (live_at_end[e->src->index], 0, j, bi)
{
struct web *web = use2web[j];
struct web *aweb = alias (find_web_for_subweb (web));
if (aweb->type != SPILLED)
update_spill_colors (&colors, web, 1);
- });
+ }
IOR_HARD_REG_SET (cum_colors, colors);
+ num++;
}
if (num == 5)
in_ir = 1;
bitmap_clear (ri.scratch);
- EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j, bi)
{
struct web *web2 = ID2WEB (j);
struct web *supweb2 = find_web_for_subweb (web2);
@@ -1722,15 +1727,14 @@ rewrite_program2 (bitmap new_deaths)
bitmap_set_bit (ri.scratch, j);
ri.num_reloads--;
}
- });
+ }
bitmap_operation (ri.need_reload, ri.need_reload, ri.scratch,
BITMAP_AND_COMPL);
}
ri.need_load = 1;
emit_loads (&ri, nl_first_reload, last_block_insn);
- if (ri.nl_size != 0 /*|| ri.num_reloads != 0*/)
- abort ();
+ gcc_assert (ri.nl_size == 0);
if (!insn)
break;
}
@@ -1745,10 +1749,8 @@ rewrite_program2 (bitmap new_deaths)
Layout of webs isn't changed they are only mentioned in changed
insns. */
static void
-mark_insn_refs_for_checking (info, already_webs, uses_as_bitmap)
- struct ra_insn_info *info;
- sbitmap already_webs;
- bitmap uses_as_bitmap;
+mark_insn_refs_for_checking (struct ra_insn_info *info, sbitmap already_webs,
+ bitmap uses_as_bitmap)
{
int i, n;
int num_refs;
@@ -1825,6 +1827,7 @@ detect_web_parts_to_rebuild (void)
unsigned int i, pass;
int uid;
struct dlist *d;
+ bitmap_iterator bi;
sbitmap already_webs = sbitmap_alloc (num_webs);
uses_as_bitmap = BITMAP_XMALLOC ();
@@ -1836,7 +1839,7 @@ detect_web_parts_to_rebuild (void)
/* One of our callers isn't allocating split_webs. */
if (split_webs)
- EXECUTE_IF_SET_IN_BITMAP (split_webs, 0, i,
+ EXECUTE_IF_SET_IN_BITMAP (split_webs, 0, i, bi)
{
struct web *web = ID2WEB (i);
if (web->type != SPILLED)
@@ -1844,9 +1847,9 @@ detect_web_parts_to_rebuild (void)
remove_web_from_list (web);
put_web (web, SPILLED);
}
- });
+ }
if (webs_changed_layout)
- EXECUTE_IF_SET_IN_BITMAP (webs_changed_layout, 0, i,
+ EXECUTE_IF_SET_IN_BITMAP (webs_changed_layout, 0, i, bi)
{
struct web *web = alias (ID2WEB (i));
if (web->type != PRECOLORED && web->type != SPILLED)
@@ -1854,7 +1857,7 @@ detect_web_parts_to_rebuild (void)
remove_web_from_list (web);
put_web (web, SPILLED);
}
- });
+ }
/* We generally want to handle all webs whose layout changed, plus the webs
which conflicted with them (for those we only need to recheck their
@@ -1890,6 +1893,7 @@ detect_web_parts_to_rebuild (void)
struct web *web = DLIST_WEB (d);
struct conflict_link *wl;
unsigned int j;
+
/* This check is only needed for coalesced nodes, but hey. */
#if 1
if (alias (web)->type != SPILLED)
@@ -1941,7 +1945,7 @@ detect_web_parts_to_rebuild (void)
if (0 && !web->changed && alias (wl->t)->type != SPILLED)
wl->t->changed = 0;
}
- EXECUTE_IF_SET_IN_BITMAP (web->useless_conflicts, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (web->useless_conflicts, 0, j, bi)
{
struct web *web2 = ID2WEB (j);
if (TEST_BIT (already_webs, web2->id))
@@ -1950,23 +1954,22 @@ detect_web_parts_to_rebuild (void)
mark_refs_for_checking (web2, uses_as_bitmap);
if (0 && !web->changed && alias (web2)->type != SPILLED)
web2->changed = 0;
- });
-
+ }
}
- EXECUTE_IF_SET_IN_BITMAP (last_changed_insns, 0, uid,
- {
- if (uid < insn_df_max_uid)
- mark_insn_refs_for_checking (&insn_df[uid], already_webs,
- uses_as_bitmap);
- });
+ EXECUTE_IF_SET_IN_BITMAP (last_changed_insns, 0, uid, bi)
+ {
+ if (uid < insn_df_max_uid)
+ mark_insn_refs_for_checking (&insn_df[uid], already_webs,
+ uses_as_bitmap);
+ };
/* We also recheck unconditionally all uses of any hardregs. This means
we _can_ delete all these uses from the live_at_end[] bitmaps.
And because we sometimes delete insn referring to hardregs (when
they became useless because they setup a rematerializable pseudo, which
then was rematerialized), some of those uses will go away with the next
- df_analyse(). This means we even _must_ delete those uses from
+ df_analyze(). This means we even _must_ delete those uses from
the live_at_end[] bitmaps. For simplicity we simply delete
all of them. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
@@ -1987,10 +1990,10 @@ detect_web_parts_to_rebuild (void)
BITMAP_AND_COMPL);
live_at_end += 2;
- if (rtl_dump_file && (debug_new_regalloc & DUMP_REBUILD) != 0)
+ if (dump_file && (debug_new_regalloc & DUMP_REBUILD) != 0)
{
ra_debug_msg (DUMP_REBUILD, "need to check these uses:\n");
- dump_sbitmap_file (rtl_dump_file, last_check_uses);
+ dump_sbitmap_file (dump_file, last_check_uses);
}
sbitmap_free (already_webs);
BITMAP_XFREE (uses_as_bitmap);
@@ -2002,11 +2005,10 @@ static unsigned HOST_WIDE_INT deleted_def_cost;
extern int flag_non_call_exceptions;
-static void try_delete_useless_def PARAMS ((rtx, rtx));
+static void try_delete_useless_def (rtx, rtx);
static void
-try_delete_useless_def (insn, set)
- rtx insn, set;
+try_delete_useless_def (rtx insn, rtx set)
{
unsigned int n;
rtx dest = SET_DEST (set);
@@ -2057,10 +2059,12 @@ static void
delete_useless_defs (void)
{
unsigned int i;
+ bitmap_iterator bi;
+
/* If the insn only sets the def without any sideeffect (besides
clobbers or uses), we can delete it. single_set() also tests
for INSN_P(insn). */
- EXECUTE_IF_SET_IN_BITMAP (useless_defs, 0, i,
+ EXECUTE_IF_SET_IN_BITMAP (useless_defs, 0, i, bi)
{
rtx insn = DF_REF_INSN (df->defs[i]);
rtx set = single_set (insn);
@@ -2070,7 +2074,7 @@ delete_useless_defs (void)
{
try_delete_useless_def (insn, set);
}
- });
+ }
}
/* Look for spilled webs, on whose behalf no insns were emitted.
@@ -2109,9 +2113,9 @@ detect_non_changed_webs (void)
}
}
-static int need_rebuild PARAMS ((void));
+static int need_rebuild (void);
static int
-need_rebuild ()
+need_rebuild (void)
{
struct dlist *d;
for (d = WEBS(SPILLED); d; d = d->next)
@@ -2140,7 +2144,7 @@ reset_changed_flag (void)
IMHO(denisc@overta.ru): This check must be supported in different
manner. */
int
-subst_to_stack_p ()
+subst_to_stack_p (void)
{
struct dlist *d;
for (d = WEBS(COLORED); d; d = d->next)
@@ -2173,6 +2177,7 @@ actual_spill (int spill_p ATTRIBUTE_UNUSED)
{
int i;
int rebuildit = 1;
+ bitmap_iterator bi;
bitmap new_deaths;
/* If we have a webs colored by an_unusable_color (ie we think that they are
@@ -2210,8 +2215,8 @@ actual_spill (int spill_p ATTRIBUTE_UNUSED)
insns_with_deaths = sbitmap_alloc (get_max_uid ());
death_insns_max_uid = get_max_uid ();
sbitmap_zero (insns_with_deaths);
- EXECUTE_IF_SET_IN_BITMAP (new_deaths, 0, i,
- { SET_BIT (insns_with_deaths, i);});
+ EXECUTE_IF_SET_IN_BITMAP (new_deaths, 0, i, bi)
+ SET_BIT (insns_with_deaths, i);
if (ra_pass > 1)
{
rebuildit = need_rebuild ();
@@ -2228,13 +2233,14 @@ actual_spill (int spill_p ATTRIBUTE_UNUSED)
static void allocate_stack_slots PARAMS ((void));
static void
-allocate_stack_slots ()
+allocate_stack_slots (void)
{
unsigned int *stack_color, *max_size, *need_align;
rtx *slots;
unsigned int max_color;
unsigned int i, max_num;
bitmap conflicts = BITMAP_XMALLOC ();
+ bitmap_iterator bi;
if (BYTES_BIG_ENDIAN)
abort();
@@ -2256,11 +2262,11 @@ allocate_stack_slots ()
for (wl = web->conflict_list; wl; wl = wl->next)
if (stack_color[wl->t->id])
bitmap_set_bit (conflicts, stack_color[wl->t->id]);
- EXECUTE_IF_SET_IN_BITMAP (web->useless_conflicts, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (web->useless_conflicts, 0, j, bi)
{
if (stack_color[j])
bitmap_set_bit (conflicts, stack_color[j]);
- });
+ }
for (this_color = 1; bitmap_bit_p (conflicts, this_color);
this_color++) ;
stack_color[i] = this_color;
@@ -2282,7 +2288,6 @@ allocate_stack_slots ()
rtx place;
mode = mode_for_size (max_size[i] * BITS_PER_UNIT, MODE_INT, 1);
place = assign_stack_local (mode, max_size[i], need_align[i] ? -1 : 0);
- /* XXX do something with RTX_UNCHANGING_P ? */
set_mem_alias_set (place, new_alias_set ());
slots[i] = place;
}
@@ -2313,7 +2318,7 @@ allocate_stack_slots ()
/* Remove all REG_EQUIV notes found in the insn chain. */
static void
-purge_reg_equiv_notes ()
+purge_reg_equiv_notes (void)
{
rtx insn, note;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
@@ -2326,7 +2331,7 @@ purge_reg_equiv_notes ()
Replace all uses and defs to stack slots in all possible cases. */
static void
-assign_stack_slots ()
+assign_stack_slots (void)
{
int i;
struct dlist *d, *d_next;
@@ -2354,7 +2359,7 @@ assign_stack_slots ()
insns_with_deaths = sbitmap_alloc (get_max_uid ());
sbitmap_zero (insns_with_deaths);
EXECUTE_IF_SET_IN_SBITMAP (old_deaths, 0, i,
- { SET_BIT (insns_with_deaths, i);});
+ SET_BIT (insns_with_deaths, i));
sbitmap_free (old_deaths);
}
death_insns_max_uid = get_max_uid ();
@@ -2366,10 +2371,7 @@ assign_stack_slots ()
Remove dead move insns.
This is the same as coalesce and substitute. */
static int
-coalesce_spill_slot (web, ref, place)
- struct web *web;
- struct ref *ref;
- rtx place;
+coalesce_spill_slot (struct web *web, struct ref *ref, rtx place)
{
rtx source;
struct web *dweb;
@@ -2497,7 +2499,7 @@ coalesce_spill_slot (web, ref, place)
This function also perform a simple elimination of dead insns. */
static void
-assign_stack_slots_1 ()
+assign_stack_slots_1 (void)
{
unsigned int j, i, n, webs_count;
struct ref **refs;
@@ -2549,8 +2551,6 @@ assign_stack_slots_1 ()
bitmap_set_bit (rewrite_stack_slots, web->regno);
}
#endif
- RTX_UNCHANGING_P (place) =
- RTX_UNCHANGING_P (regno_reg_rtx[web->regno]);
set_mem_alias_set (place, new_alias_set ());
}
if (web->pattern)
@@ -2630,8 +2630,8 @@ assign_stack_slots_1 ()
{
rtx pi;
emit_insn_before (insns, insn);
- if (bb->head == insn)
- bb->head = NEXT_INSN (aux_insn);
+ if (BB_HEAD (bb) == insn)
+ BB_HEAD (bb) = NEXT_INSN (aux_insn);
for (pi = PREV_INSN (insn); pi != aux_insn;
pi = PREV_INSN (pi))
{
@@ -2645,8 +2645,8 @@ assign_stack_slots_1 ()
{
rtx ni;
emit_insn_after (insns, insn);
- if (bb->end == insn)
- bb->end = PREV_INSN (aux_insn);
+ if (BB_END (bb) == insn)
+ BB_END (bb) = PREV_INSN (aux_insn);
for (ni = insns; ni != aux_insn; ni = NEXT_INSN (ni))
{
set_block_for_insn (ni, bb);
@@ -2762,8 +2762,6 @@ emit_colors (struct df *df)
rtx place = assign_stack_local (PSEUDO_REGNO_MODE (web->regno),
total_size,
inherent_size == total_size ? 0: -1);
- RTX_UNCHANGING_P (place) =
- RTX_UNCHANGING_P (regno_reg_rtx[web->regno]);
set_mem_alias_set (place, new_alias_set ());
web->reg_rtx = place;
}
@@ -2985,7 +2983,7 @@ remove_suspicious_death_notes (void)
rtx note = *pnote;
if ((REG_NOTE_KIND (note) == REG_DEAD
|| REG_NOTE_KIND (note) == REG_UNUSED)
- && (GET_CODE (XEXP (note, 0)) == REG
+ && (REG_P (XEXP (note, 0))
&& bitmap_bit_p (regnos_coalesced_to_hardregs,
REGNO (XEXP (note, 0)))))
*pnote = XEXP (note, 1);
@@ -3019,10 +3017,9 @@ setup_renumber (int free_it)
}
}
-static struct web * get_aliased_aequivalent PARAMS ((struct web *));
+static struct web * get_aliased_aequivalent (struct web *);
static struct web *
-get_aliased_aequivalent (web)
- struct web *web;
+get_aliased_aequivalent (struct web *web)
{
struct web *supweb = find_web_for_subweb (web);
struct web *aweb = alias (supweb);
@@ -3050,13 +3047,13 @@ get_aliased_aequivalent (web)
functions are not confused by partial sets, which _we_ know are the
initial defines. */
void
-create_flow_barriers ()
+create_flow_barriers (void)
{
basic_block bb;
FOR_EACH_BB (bb)
{
rtx insn;
- for (insn = bb->head; insn != bb->end; insn = NEXT_INSN (insn))
+ for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
unsigned int d;
@@ -3104,12 +3101,13 @@ create_flow_barriers_2 (void)
{
int j;
rtx insn, prev_insn;
+ bitmap_iterator bi;
sbitmap_zero (live);
- EXECUTE_IF_SET_IN_BITMAP (live_at_end[bb->index], 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (live_at_end[bb->index], 0, j, bi)
{
set_web_live (live, get_aliased_aequivalent (use2web[j]));
- });
- for (insn = bb->end; insn; insn = prev_insn)
+ }
+ for (insn = BB_END (bb); insn; insn = prev_insn)
{
prev_insn = PREV_INSN (insn);
@@ -3136,7 +3134,7 @@ create_flow_barriers_2 (void)
set_web_live (live, web);
}
- EXECUTE_IF_SET_IN_BITMAP (partly_defined, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (partly_defined, 0, j, bi)
{
struct web *web = ID2WEB (j);
if (web->type != PRECOLORED
@@ -3149,10 +3147,10 @@ create_flow_barriers_2 (void)
emit_insn_before (gen_rtx_CLOBBER (VOIDmode,
web->reg_rtx), insn);
}
- });
+ }
}
- if (insn == bb->head)
+ if (insn == BB_HEAD (bb))
break;
}
}
@@ -3172,14 +3170,11 @@ struct split_cost
static struct split_cost *split_costs;
static sbitmap *live_at_begin;
-static void set_web_live_s PARAMS ((sbitmap, bitmap, struct web*));
-static void reset_web_live_s PARAMS ((sbitmap, bitmap, struct web*));
+static void set_web_live_s (sbitmap, bitmap, struct web*);
+static void reset_web_live_s (sbitmap, bitmap, struct web*);
static void
-set_web_live_s (live, suplive, web)
- sbitmap live;
- bitmap suplive;
- struct web *web;
+set_web_live_s (sbitmap live, bitmap suplive, struct web *web)
{
set_web_live (live, web);
web = find_web_for_subweb (web);
@@ -3190,10 +3185,7 @@ set_web_live_s (live, suplive, web)
}
static void
-reset_web_live_s (live, suplive, web)
- sbitmap live;
- bitmap suplive;
- struct web *web;
+reset_web_live_s (sbitmap live, bitmap suplive, struct web *web)
{
reset_web_live (live, web);
web = find_web_for_subweb (web);
@@ -3215,17 +3207,18 @@ have_splits_p (void)
return 0;
}
-extern int copy_insn_p PARAMS ((rtx, rtx *, rtx *));
-extern void init_split_costs PARAMS ((void));
-extern int find_splits PARAMS ((struct web *));
-extern void free_split_costs PARAMS ((void));
+extern int copy_insn_p (rtx, rtx *, rtx *);
+extern void init_split_costs (void);
+extern int find_splits (struct web *);
+extern void free_split_costs (void);
void
-init_split_costs ()
+init_split_costs (void)
{
basic_block bb;
unsigned int i;
sbitmap live;
+ bitmap_iterator bi;
bitmap suplive = BITMAP_XMALLOC ();
live = sbitmap_alloc (num_webs);
contained = (bitmap *) xmalloc ((num_webs - num_subwebs) * sizeof (bitmap));
@@ -3248,11 +3241,11 @@ init_split_costs ()
int j;
rtx insn;
sbitmap_zero (live);
- EXECUTE_IF_SET_IN_BITMAP (live_at_end[bb->index], 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (live_at_end[bb->index], 0, j, bi)
{
set_web_live (live, use2web[j]);
- });
- for (insn = bb->end; insn; insn = PREV_INSN (insn))
+ }
+ for (insn = BB_END (bb); insn; insn = PREV_INSN (insn))
{
if (INSN_P (insn))
{
@@ -3265,7 +3258,7 @@ init_split_costs ()
for (n = 0; n < info.num_uses; n++)
set_web_live (live, use2web[DF_REF_ID (info.uses[n])]);
}
- if (insn == bb->head)
+ if (insn == BB_HEAD (bb))
break;
}
sbitmap_copy (live_at_begin[bb->index], live);
@@ -3274,15 +3267,16 @@ init_split_costs ()
{
int j;
edge e;
+ edge_iterator ei;
rtx insn;
sbitmap_zero (live);
- EXECUTE_IF_SET_IN_BITMAP (live_at_end[bb->index], 0, j,
- set_web_live_s (live, suplive, use2web[j]));
- for (e = bb->succ; e; e = e->succ_next)
+ EXECUTE_IF_SET_IN_BITMAP (live_at_end[bb->index], 0, j, bi)
+ set_web_live_s (live, suplive, use2web[j]);
+ FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e->dest == EXIT_BLOCK_PTR)
continue;
- EXECUTE_IF_SET_IN_BITMAP (suplive, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (suplive, 0, j, bi)
{
if (!is_partly_live (live_at_begin[e->dest->index], ID2WEB (j)))
{
@@ -3292,19 +3286,19 @@ init_split_costs ()
else
split_costs[j].loads += (4 * (e->dest->frequency + 1)) / 3;
}
- });
+ }
}
/* We can't split around webs which have a reference in the last
insn of a basic block, if one of the outgoing edges is non
splittable (for now critical, because we don't yet split). */
- for (e = bb->succ; e; e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, bb->succs)
if (e->dest != EXIT_BLOCK_PTR
&& EDGE_CRITICAL_P (e))
{
unsigned int n;
struct ra_insn_info info;
- insn = bb->end;
+ insn = BB_END (bb);
if (!INSN_P (insn))
insn = prev_real_insn (insn);
info = insn_df[INSN_UID (insn)];
@@ -3319,7 +3313,7 @@ init_split_costs ()
break;
}
- for (insn = bb->end; insn; insn = PREV_INSN (insn))
+ for (insn = BB_END (bb); insn; insn = PREV_INSN (insn))
{
if (INSN_P (insn))
{
@@ -3337,10 +3331,10 @@ init_split_costs ()
web = find_web_for_subweb (web);
if (!is_partly_live (live, web))
{
- if (insn != bb->end)
+ if (insn != BB_END (bb))
split_costs[web->id].loads += (4 * (bb->frequency + 1)) / 3;
else
- for (e = bb->succ; e; e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, bb->succs)
split_costs[web->id].loads += (4 * (e->dest->frequency + 1)) / 3;
}
}
@@ -3398,14 +3392,14 @@ init_split_costs ()
struct web *web1 = def2web[DF_REF_ID (info.defs[n])];
web1 = find_web_for_subweb (web1);
if (!web1->pattern)
- EXECUTE_IF_SET_IN_BITMAP (suplive, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (suplive, 0, j, bi)
{
struct web *web2 = ID2WEB (j);
if (web1 != web2
&& hard_regs_intersect_p (&web1->orig_usable_regs,
&web2->orig_usable_regs))
bitmap_set_bit (contained[j], web1->id);
- });
+ }
}
/* All defs' webs are now not live anymore, except for
early clobber ones. */
@@ -3419,14 +3413,14 @@ init_split_costs ()
{
struct web *web1 = use2web[DF_REF_ID (info.uses[n])];
web1 = find_web_for_subweb (web1);
- EXECUTE_IF_SET_IN_BITMAP (suplive, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (suplive, 0, j, bi)
{
struct web *web2 = ID2WEB (j);
if (web1 != web2
&& hard_regs_intersect_p (&web1->orig_usable_regs,
&web2->orig_usable_regs))
bitmap_set_bit (contained[j], web1->id);
- });
+ }
}
/* Now all effects of the insn have been seen, so also
early clobber webs are not live anymore. */
@@ -3445,7 +3439,7 @@ init_split_costs ()
for (n = 0; n < info.num_uses; n++)
set_web_live_s (live, suplive, use2web[DF_REF_ID (info.uses[n])]);
}
- if (insn == bb->head)
+ if (insn == BB_HEAD (bb))
break;
}
}
@@ -3457,8 +3451,7 @@ init_split_costs ()
}
int
-find_splits (web)
- struct web *web;
+find_splits (struct web *web)
{
struct conflict_link *wl;
unsigned int *min_color;
@@ -3605,15 +3598,11 @@ find_splits (web)
return 0;
}
-static void split_insert_load PARAMS ((struct web *, rtx, int, sbitmap));
-static void split_insert_store PARAMS ((struct web *, rtx, int, sbitmap, sbitmap, bitmap));
+static void split_insert_load (struct web *, rtx, int, sbitmap);
+static void split_insert_store (struct web *, rtx, int, sbitmap, sbitmap, bitmap);
static void
-split_insert_load (web, insn, before, live)
- struct web *web;
- rtx insn;
- int before;
- sbitmap live;
+split_insert_load (struct web *web, rtx insn, int before, sbitmap live)
{
rtx slot, ni;
struct web *aweb;
@@ -3672,27 +3661,23 @@ split_insert_load (web, insn, before, live)
i2 = insn;
}
if (!before && (debug_new_regalloc & DUMP_PROCESS))
- ra_print_rtx_top (rtl_dump_file, insn, 1);
+ ra_print_rtx_top (dump_file, insn, 1);
for (ni = i1; ni != i2; ni = NEXT_INSN (ni))
{
df_insn_modify (df, bb, ni);
bitmap_set_bit (ra_modified_insns, INSN_UID (ni));
bitmap_set_bit (emitted_by_spill, INSN_UID (ni));
if (debug_new_regalloc & DUMP_PROCESS)
- ra_print_rtx_top (rtl_dump_file, ni, 0);
+ ra_print_rtx_top (dump_file, ni, 0);
}
if (before && (debug_new_regalloc & DUMP_PROCESS))
- ra_print_rtx_top (rtl_dump_file, insn, 1);
+ ra_print_rtx_top (dump_file, insn, 1);
}
}
static void
-split_insert_store (web, insn, before, live, need_load, new_deaths)
- struct web *web;
- rtx insn;
- int before;
- sbitmap live, need_load;
- bitmap new_deaths;
+split_insert_store (struct web *web, rtx insn, int before, sbitmap live,
+ sbitmap need_load, bitmap new_deaths)
{
rtx ni;
struct web *aweb;
@@ -3749,26 +3734,26 @@ split_insert_store (web, insn, before, live, need_load, new_deaths)
i2 = insn;
}
if (!before && (debug_new_regalloc & DUMP_PROCESS))
- ra_print_rtx_top (rtl_dump_file, insn, 1);
+ ra_print_rtx_top (dump_file, insn, 1);
for (ni = i1; ni != i2; ni = NEXT_INSN (ni))
{
df_insn_modify (df, bb, ni);
bitmap_set_bit (ra_modified_insns, INSN_UID (ni));
bitmap_set_bit (emitted_by_spill, INSN_UID (ni));
if (debug_new_regalloc & DUMP_PROCESS)
- ra_print_rtx_top (rtl_dump_file, ni, 0);
+ ra_print_rtx_top (dump_file, ni, 0);
}
if (before && (debug_new_regalloc & DUMP_PROCESS))
- ra_print_rtx_top (rtl_dump_file, insn, 1);
+ ra_print_rtx_top (dump_file, insn, 1);
}
}
void
-insert_splits (new_deaths)
- bitmap new_deaths;
+insert_splits (bitmap new_deaths)
{
basic_block bb;
sbitmap live, need_load;
+ bitmap_iterator bi;
bitmap suplive, split_those_1, lazy_store;
if (!have_splits_p ())
return;
@@ -3781,18 +3766,19 @@ insert_splits (new_deaths)
{
int j;
edge e;
+ edge_iterator ei;
rtx insn;
sbitmap_zero (live);
bitmap_zero (suplive);
bitmap_zero (lazy_store);
- EXECUTE_IF_SET_IN_BITMAP (live_at_end[bb->index], 0, j,
- set_web_live_s (live, suplive, use2web[j]));
+ EXECUTE_IF_SET_IN_BITMAP (live_at_end[bb->index], 0, j, bi)
+ set_web_live_s (live, suplive, use2web[j]);
sbitmap_copy (need_load, live);
- for (e = bb->succ; e; e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e->dest == EXIT_BLOCK_PTR)
continue;
- EXECUTE_IF_SET_IN_BITMAP (suplive, 0, j,
+ EXECUTE_IF_SET_IN_BITMAP (suplive, 0, j, bi)
{
if (!is_partly_live (live_at_begin[e->dest->index], ID2WEB (j)))
{
@@ -3803,15 +3789,16 @@ insert_splits (new_deaths)
else
{
int w;
- EXECUTE_IF_SET_IN_BITMAP (split_around[j], 0, w,
+ bitmap_iterator bw;
+ EXECUTE_IF_SET_IN_BITMAP (split_around[j], 0, w, bw)
split_insert_load (ID2WEB (w),
- next_real_insn (e->dest->head), 1,
- live_at_begin[e->dest->index]));
+ next_real_insn (BB_HEAD (e->dest)),
+ 1, live_at_begin[e->dest->index]);
}
}
- });
+ }
}
- for (insn = bb->end; insn; insn = PREV_INSN (insn))
+ for (insn = BB_END (bb); insn; insn = PREV_INSN (insn))
{
if (INSN_P (insn)
&& INSN_UID (insn) < insn_df_max_uid)
@@ -3909,21 +3896,25 @@ insert_splits (new_deaths)
}
if (split_those)
{
- if (insn != bb->end || !JUMP_P (insn))
- EXECUTE_IF_SET_IN_BITMAP (split_those, 0, w,
- split_insert_load (ID2WEB (w), insn, 0, need_load);
- reset_web_live (need_load, ID2WEB (w));
- bitmap_clear_bit (lazy_store, w));
+ if (insn != BB_END (bb) || !JUMP_P (insn))
+ EXECUTE_IF_SET_IN_BITMAP (split_those, 0, w, bi)
+ {
+ split_insert_load (ID2WEB (w), insn, 0, need_load);
+ reset_web_live (need_load, ID2WEB (w));
+ bitmap_clear_bit (lazy_store, w);
+ }
else
{
- for (e = bb->succ; e; e = e->succ_next)
- EXECUTE_IF_SET_IN_BITMAP (split_those, 0, w,
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ EXECUTE_IF_SET_IN_BITMAP (split_those, 0, w, bi)
split_insert_load (ID2WEB (w),
- next_real_insn (e->dest->head),
- 1, need_load));
- EXECUTE_IF_SET_IN_BITMAP (split_those, 0, w,
- reset_web_live (need_load, ID2WEB (w));
- bitmap_clear_bit (lazy_store, w));
+ next_real_insn (BB_HEAD (e->dest)),
+ 1, need_load);
+ EXECUTE_IF_SET_IN_BITMAP (split_those, 0, w, bi)
+ {
+ reset_web_live (need_load, ID2WEB (w));
+ bitmap_clear_bit (lazy_store, w);
+ }
}
}
@@ -3957,6 +3948,7 @@ insert_splits (new_deaths)
if (!complete_define)
continue;
EXECUTE_IF_SET_IN_BITMAP (split_around[supweb->id], 0, w,
+ bi)
{
struct web *web = ID2WEB (w);
if (!web->pattern)
@@ -3966,7 +3958,7 @@ insert_splits (new_deaths)
/*split_insert_store (web, insn, 1, live, need_load,
new_deaths); */
}
- });
+ };
}
/*for (n = 0; n < info.num_uses; n++)
{
@@ -3987,11 +3979,11 @@ insert_splits (new_deaths)
rtx insn2;
for (insn2 = insn;
insn2 != web->last_use_insn
- && insn2 != NEXT_INSN (bb->end);
+ && insn2 != NEXT_INSN (BB_END (bb));
insn2 = NEXT_INSN (insn2))
if (1 || bitmap_bit_p (new_deaths, INSN_UID (insn2)))
break;
- if (insn2 == NEXT_INSN (bb->end))
+ if (insn2 == NEXT_INSN (BB_END (bb)))
abort ();
if (insn2 == web->last_use_insn)
split_insert_store (web, insn2, 1, live, need_load, new_deaths);
@@ -4012,28 +4004,28 @@ insert_splits (new_deaths)
set_web_live (need_load, use2web[DF_REF_ID (info.uses[n])]);
}
}
- if (insn == bb->head)
+ if (insn == BB_HEAD (bb))
break;
}
- insn = INSN_P (bb->head) ? bb->head : next_real_insn (bb->head);
- EXECUTE_IF_SET_IN_BITMAP (lazy_store, 0, j,
+ insn = INSN_P (BB_HEAD (bb)) ? BB_HEAD (bb) : next_real_insn (BB_HEAD (bb));
+ EXECUTE_IF_SET_IN_BITMAP (lazy_store, 0, j, bi)
{
struct web *web = ID2WEB (j);
rtx insn2;
for (insn2 = insn;
insn2 != web->last_use_insn
- && insn2 != NEXT_INSN (bb->end);
+ && insn2 != NEXT_INSN (BB_END (bb));
insn2 = NEXT_INSN (insn2))
if (1 || bitmap_bit_p (new_deaths, INSN_UID (insn2)))
break;
- if (insn2 == NEXT_INSN (bb->end))
+ if (insn2 == NEXT_INSN (BB_END (bb)))
abort ();
if (insn2 == web->last_use_insn)
split_insert_store (web, insn2, 1, live, need_load, new_deaths);
else
split_insert_store (web, insn2, 0, live, need_load, new_deaths);
/*split_insert_store (ID2WEB (j), insn, 1, live, need_load, new_deaths)*/
- });
+ };
}
sbitmap_free (need_load);
sbitmap_free (live);
@@ -4043,7 +4035,7 @@ insert_splits (new_deaths)
}
void
-free_split_costs ()
+free_split_costs (void)
{
unsigned int i;
if (!live_at_begin)
@@ -4066,9 +4058,7 @@ free_split_costs ()
/* The WEB can't have a single color. The REF is a constraining ref.
The REF will be spilled out from the WEB. */
void
-web_class_spill_ref (web, ref)
- struct web *web;
- struct ref *ref;
+web_class_spill_ref (struct web *web, struct ref *ref)
{
rtx insns;
rtx insn = DF_REF_INSN (ref);
@@ -4144,8 +4134,8 @@ web_class_spill_ref (web, ref)
rtx pi;
rtx aux_insn = PREV_INSN (insn);
emit_insn_before (insns, insn);
- if (bb->head == insn)
- bb->head = NEXT_INSN (aux_insn);
+ if (BB_HEAD (bb) == insn)
+ BB_HEAD (bb) = NEXT_INSN (aux_insn);
for (pi = PREV_INSN (insn); pi != aux_insn;
pi = PREV_INSN (pi))
{
@@ -4165,8 +4155,8 @@ web_class_spill_ref (web, ref)
rtx ni;
rtx aux_insn = NEXT_INSN (insn);
emit_insn_after (insns, insn);
- if (bb->end == insn)
- bb->end = PREV_INSN (aux_insn);
+ if (BB_END (bb) == insn)
+ BB_END (bb) = PREV_INSN (aux_insn);
for (ni = insns; ni != aux_insn; ni = NEXT_INSN (ni))
{
set_block_for_insn (ni, bb);
diff --git a/gcc/ra.c b/gcc/ra.c
index 0846a9ea74b..ea69e901b90 100644
--- a/gcc/ra.c
+++ b/gcc/ra.c
@@ -85,6 +85,7 @@
* use the constraints from asms
*/
+static int first_hard_reg (HARD_REG_SET);
static struct obstack ra_obstack;
static void create_insn_info (struct df *);
static void free_insn_info (void);
@@ -151,6 +152,7 @@ unsigned int remember_conflicts;
HARD_REG_SET never_use_colors;
HARD_REG_SET usable_regs[N_REG_CLASSES];
unsigned int num_free_regs[N_REG_CLASSES];
+int single_reg_in_regclass[N_REG_CLASSES];
HARD_REG_SET hardregs_for_mode[NUM_MACHINE_MODES];
HARD_REG_SET invalid_mode_change_regs;
unsigned char byte2bitcount[256];
@@ -231,6 +233,21 @@ hard_regs_count (HARD_REG_SET rs)
return count;
}
+/* Returns the first hardreg in HARD_REG_SET RS. Assumes there is at
+ least one reg in the set. */
+
+static int
+first_hard_reg (HARD_REG_SET rs)
+{
+ int c;
+
+ for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
+ if (TEST_HARD_REG_BIT (rs, c))
+ break;
+ gcc_assert (c < FIRST_PSEUDO_REGISTER);
+ return c;
+}
+
/* Basically like emit_move_insn (i.e. validifies constants and such),
but also handle MODE_CC moves (but then the operands must already
be basically valid). */
@@ -295,8 +312,7 @@ create_insn_info (struct df *df)
act_refs += n;
insn_df[uid].num_uses = n;
}
- if (refs_for_insn_df + (df->def_id + df->use_id) < act_refs)
- abort ();
+ gcc_assert (refs_for_insn_df + (df->def_id + df->use_id) >= act_refs);
}
/* Free the insn_df structures. */
@@ -319,8 +335,7 @@ struct web *
find_subweb (struct web *web, rtx reg)
{
struct web *w;
- if (GET_CODE (reg) != SUBREG)
- abort ();
+ gcc_assert (GET_CODE (reg) == SUBREG);
for (w = web->subreg_next; w; w = w->subreg_next)
if (GET_MODE (w->orig_x) == GET_MODE (reg)
&& SUBREG_BYTE (w->orig_x) == SUBREG_BYTE (reg))
@@ -377,9 +392,7 @@ lose:
Return nonzero if they do. */
int
-hard_regs_combinable_p (w1, w2)
- struct web *w1;
- struct web *w2;
+hard_regs_combinable_p (struct web *w1, struct web *w2)
{
HARD_REG_SET c;
COPY_HARD_REG_SET (c, w1->usable_regs);
@@ -390,8 +403,7 @@ hard_regs_combinable_p (w1, w2)
/* Returns 1 of hard register set A and B are equal. */
int
-hard_regs_same_p (a, b)
- HARD_REG_SET a, b;
+hard_regs_same_p (HARD_REG_SET a, HARD_REG_SET b)
{
GO_IF_HARD_REG_EQUAL (a, b, equal);
return 0;
@@ -543,9 +555,7 @@ init_ra (void)
#endif
int need_fp
= (! flag_omit_frame_pointer
-#ifdef EXIT_IGNORE_STACK
|| (current_function_calls_alloca && EXIT_IGNORE_STACK)
-#endif
|| FRAME_POINTER_REQUIRED);
#ifdef ORDER_REGS_FOR_LOCAL_ALLOC
@@ -567,26 +577,26 @@ init_ra (void)
{
if (! CAN_ELIMINATE (eliminables[j].from, eliminables[j].to)
|| (eliminables[j].to == STACK_POINTER_REGNUM && need_fp))
- for (i = HARD_REGNO_NREGS (eliminables[j].from, Pmode); i--;)
+ for (i = hard_regno_nregs[eliminables[j].from][Pmode]; i--;)
SET_HARD_REG_BIT (never_use_colors, eliminables[j].from + i);
}
#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
if (need_fp)
- for (i = HARD_REGNO_NREGS (HARD_FRAME_POINTER_REGNUM, Pmode); i--;)
+ for (i = hard_regno_nregs[HARD_FRAME_POINTER_REGNUM][Pmode]; i--;)
SET_HARD_REG_BIT (never_use_colors, HARD_FRAME_POINTER_REGNUM + i);
#endif
#else
if (need_fp)
- for (i = HARD_REGNO_NREGS (FRAME_POINTER_REGNUM, Pmode); i--;)
+ for (i = hard_regno_nregs[FRAME_POINTER_REGNUM][Pmode]; i--;)
SET_HARD_REG_BIT (never_use_colors, FRAME_POINTER_REGNUM + i);
#endif
/* Stack and argument pointer are also rather useless to us. */
- for (i = HARD_REGNO_NREGS (STACK_POINTER_REGNUM, Pmode); i--;)
+ for (i = hard_regno_nregs[STACK_POINTER_REGNUM][Pmode]; i--;)
SET_HARD_REG_BIT (never_use_colors, STACK_POINTER_REGNUM + i);
- for (i = HARD_REGNO_NREGS (ARG_POINTER_REGNUM, Pmode); i--;)
+ for (i = hard_regno_nregs[ARG_POINTER_REGNUM][Pmode]; i--;)
SET_HARD_REG_BIT (never_use_colors, ARG_POINTER_REGNUM + i);
for (i = 0; i < 256; i++)
@@ -610,6 +620,10 @@ init_ra (void)
size = hard_regs_count (rs);
num_free_regs[i] = size;
COPY_HARD_REG_SET (usable_regs[i], rs);
+ if (size == 1)
+ single_reg_in_regclass[i] = first_hard_reg (rs);
+ else
+ single_reg_in_regclass[i] = -1;
}
/* Setup hardregs_for_mode[].
@@ -623,7 +637,7 @@ init_ra (void)
for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
if (HARD_REGNO_MODE_OK (reg, i)
/* Ignore VOIDmode and similar things. */
- && (size = HARD_REGNO_NREGS (reg, i)) != 0
+ && (size = hard_regno_nregs[reg][i]) != 0
&& (reg + size) <= FIRST_PSEUDO_REGISTER)
{
while (size--)
@@ -652,8 +666,8 @@ init_ra (void)
an_unusable_color++)
if (TEST_HARD_REG_BIT (never_use_colors, an_unusable_color))
break;
- if (an_unusable_color == FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (an_unusable_color != FIRST_PSEUDO_REGISTER);
+
init_long_blocks_for_classes ();
compute_bb_for_insn ();
ra_reg_renumber = NULL;
@@ -668,7 +682,7 @@ init_ra (void)
gcc_obstack_init (&ra_obstack);
}
-/* Check the consistency of DF. This aborts if it violates some
+/* Check the consistency of DF. This asserts if it violates some
invariances we expect. */
static void
@@ -699,19 +713,21 @@ check_df (struct df *df)
{
bitmap_clear (b);
for (link = DF_INSN_DEFS (df, insn); link; link = link->next)
- if (!link->ref || bitmap_bit_p (empty_defs, DF_REF_ID (link->ref))
- || bitmap_bit_p (b, DF_REF_ID (link->ref)))
- abort ();
- else
+ {
+ gcc_assert (link->ref);
+ gcc_assert (!bitmap_bit_p (empty_defs, DF_REF_ID (link->ref)));
+ gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
+ }
bitmap_clear (b);
for (link = DF_INSN_USES (df, insn); link; link = link->next)
- if (!link->ref || bitmap_bit_p (empty_uses, DF_REF_ID (link->ref))
- || bitmap_bit_p (b, DF_REF_ID (link->ref)))
- abort ();
- else
+ {
+ gcc_assert (link->ref);
+ gcc_assert (!bitmap_bit_p (empty_uses, DF_REF_ID (link->ref)));
+ gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
+ }
}
/* Now the same for the chains per register number. */
@@ -719,19 +735,21 @@ check_df (struct df *df)
{
bitmap_clear (b);
for (link = df->regs[regno].defs; link; link = link->next)
- if (!link->ref || bitmap_bit_p (empty_defs, DF_REF_ID (link->ref))
- || bitmap_bit_p (b, DF_REF_ID (link->ref)))
- abort ();
- else
+ {
+ gcc_assert (link->ref);
+ gcc_assert (!bitmap_bit_p (empty_defs, DF_REF_ID (link->ref)));
+ gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
+ }
bitmap_clear (b);
for (link = df->regs[regno].uses; link; link = link->next)
- if (!link->ref || bitmap_bit_p (empty_uses, DF_REF_ID (link->ref))
- || bitmap_bit_p (b, DF_REF_ID (link->ref)))
- abort ();
- else
+ {
+ gcc_assert (link->ref);
+ gcc_assert (!bitmap_bit_p (empty_uses, DF_REF_ID (link->ref)));
+ gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
+ }
}
BITMAP_XFREE (empty_uses);
@@ -753,19 +771,19 @@ validify_one_insn (rtx insn)
for (i = 0; i < n_ops; i++)
if (strchr (recog_data.constraints[i], '%') != NULL)
commutative = i;
- ra_print_rtx_top (rtl_dump_file, insn, 0);
+ ra_print_rtx_top (dump_file, insn, 0);
if (recog_data.n_alternatives == 0 || n_ops == 0)
{
if (!valid)
abort ();
- fprintf (rtl_dump_file,
+ fprintf (dump_file,
" --> has no constrained operands, i.e. is valid\n");
}
else if (valid)
{
if (alt < 0)
abort ();
- fprintf (rtl_dump_file, " --> matched alternative %d\n", alt);
+ fprintf (dump_file, " --> matched alternative %d\n", alt);
for (i = 0; i < n_ops; i++)
{
char *constraint = xstrdup (recog_op_alt[i][alt].constraint);
@@ -774,22 +792,22 @@ validify_one_insn (rtx insn)
if (comma)
*comma = 0;
len = strlen (constraint);
- fprintf (rtl_dump_file, "\top%d: %s\t", i, constraint);
+ fprintf (dump_file, "\top%d: %s\t", i, constraint);
if (len <= 2)
- fprintf (rtl_dump_file, "\t");
+ fprintf (dump_file, "\t");
if (comma)
*comma = ',';
- ra_print_rtx (rtl_dump_file, recog_data.operand[i], 0);
- fprintf (rtl_dump_file, "\n");
+ ra_print_rtx (dump_file, recog_data.operand[i], 0);
+ fprintf (dump_file, "\n");
free (constraint);
}
}
else
{
- fprintf (rtl_dump_file, " --> invalid insn");
+ fprintf (dump_file, " --> invalid insn");
if (commutative >= 0)
- fprintf (rtl_dump_file, ", but commutative in op %d", commutative);
- fprintf (rtl_dump_file, "\n");
+ fprintf (dump_file, ", but commutative in op %d", commutative);
+ fprintf (dump_file, "\n");
}
}
@@ -798,7 +816,7 @@ make_insns_structurally_valid (void)
{
rtx insn;
int old_rip = reload_in_progress;
- if (!rtl_dump_file || (debug_new_regalloc & DUMP_VALIDIFY) == 0)
+ if (!dump_file || (debug_new_regalloc & DUMP_VALIDIFY) == 0)
return;
reload_in_progress = 0;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
@@ -854,7 +872,7 @@ detect_possible_mem_refs (struct df *df)
the allocator. */
static void
-cleanup_insn_stream ()
+cleanup_insn_stream (void)
{
rtx insn;
for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
@@ -883,10 +901,10 @@ split_critical_edges (void)
basic_block bb;
FOR_EACH_BB (bb)
{
- edge e, e_succ;
- for (e = bb->succ; e; e = e_succ)
+ edge_iterator ei;
+ edge e;
+ FOR_EACH_EDGE (e, ei, bb->succs)
{
- e_succ = e->succ_next;
if (EDGE_CRITICAL_P (e)
&& (e->flags & EDGE_ABNORMAL) == 0)
split_edge (e);
@@ -903,7 +921,7 @@ void
reg_alloc (void)
{
int changed;
- FILE *ra_dump_file = rtl_dump_file;
+ FILE *ra_dump_file = dump_file;
rtx last;
bitmap use_insns = BITMAP_XMALLOC ();
@@ -928,10 +946,12 @@ reg_alloc (void)
if (last)
{
edge e;
- for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
{
basic_block bb = e->src;
- last = bb->end;
+ last = BB_END (bb);
if (!INSN_P (last) || GET_CODE (PATTERN (last)) != USE)
{
rtx insn, insns;
@@ -961,7 +981,7 @@ reg_alloc (void)
break;
case 6: debug_new_regalloc = DUMP_VALIDIFY; break;
}
- if (!rtl_dump_file)
+ if (!dump_file)
debug_new_regalloc = 0;
/* First cleanup the insn stream of confusing clobber and self-copy
@@ -973,10 +993,10 @@ reg_alloc (void)
for each pseudo. Deactivate emitting of debug info, if it's not
explicitly requested. */
if ((debug_new_regalloc & DUMP_REGCLASS) == 0)
- rtl_dump_file = NULL;
+ dump_file = NULL;
if (!flag_ra_pre_reload)
- regclass (get_insns (), max_reg_num (), rtl_dump_file);
- rtl_dump_file = ra_dump_file;
+ regclass (get_insns (), max_reg_num (), dump_file);
+ dump_file = ra_dump_file;
/* Initialize the different global arrays and regsets. */
init_ra ();
@@ -1038,11 +1058,11 @@ reg_alloc (void)
if (flag_ra_pre_reload)
{
pre_reload (ra_info, ra_modified_insns);
- if (rtl_dump_file && ra_pass == 1 && (debug_new_regalloc & DUMP_RTL))
+ if (dump_file && ra_pass == 1 && (debug_new_regalloc & DUMP_RTL))
{
ra_debug_msg (DUMP_NEARLY_EVER, "Original function:\n");
- ra_print_rtl_with_bb (rtl_dump_file, get_insns ());
- fflush (rtl_dump_file);
+ ra_print_rtl_with_bb (dump_file, get_insns ());
+ fflush (dump_file);
}
}
@@ -1075,7 +1095,7 @@ reg_alloc (void)
/* First collect all the register refs and put them into
chains per insn, and per regno. In later passes only update
that info from the new and modified insns. */
- df_analyse (df, (ra_pass == 1) ? 0 : (bitmap) -1,
+ df_analyze (df, (ra_pass == 1) ? 0 : (bitmap) -1,
DF_HARD_REGS | DF_RD_CHAIN | DF_RU_CHAIN | DF_FOR_REGALLOC);
if (flag_ra_pre_reload)
@@ -1084,10 +1104,10 @@ reg_alloc (void)
if ((debug_new_regalloc & DUMP_DF) != 0)
{
rtx insn;
- df_dump (df, DF_HARD_REGS, rtl_dump_file);
+ df_dump (df, DF_HARD_REGS, dump_file);
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
- df_insn_debug_regno (df, insn, rtl_dump_file);
+ df_insn_debug_regno (df, insn, dump_file);
}
check_df (df);
@@ -1095,8 +1115,8 @@ reg_alloc (void)
first pass), reallocate only additional memory. */
alloc_mem (df);
/*ra_debug_msg (DUMP_EVER, "before one_pass()\n");
- if (rtl_dump_file)
- print_rtl_with_bb (rtl_dump_file, get_insns ());
+ if (dump_file)
+ print_rtl_with_bb (dump_file, get_insns ());
verify_flow_info ();*/
detect_possible_mem_refs (df);
@@ -1104,8 +1124,8 @@ reg_alloc (void)
spill insns. This also might delete certain move insns. */
changed = one_pass (df, ra_pass > 1);
/*ra_debug_msg (DUMP_EVER, "after one_pass()\n");
- if (rtl_dump_file)
- print_rtl_with_bb (rtl_dump_file, get_insns ());
+ if (dump_file)
+ print_rtl_with_bb (dump_file, get_insns ());
verify_flow_info ();*/
if (flag_ra_pre_reload)
@@ -1140,7 +1160,7 @@ reg_alloc (void)
therefore repeat some things, including some initialization
of global data structures. */
if ((debug_new_regalloc & DUMP_REGCLASS) == 0)
- rtl_dump_file = NULL;
+ dump_file = NULL;
/* We have new pseudos (the stackwebs). */
allocate_reg_info (max_reg_num (), FALSE, FALSE);
/* And new insns. */
@@ -1157,8 +1177,8 @@ reg_alloc (void)
max_regno = max_reg_num ();
/* And they need usefull classes too. */
if (!flag_ra_pre_reload)
- regclass (get_insns (), max_reg_num (), rtl_dump_file);
- rtl_dump_file = ra_dump_file;
+ regclass (get_insns (), max_reg_num (), dump_file);
+ dump_file = ra_dump_file;
/* Remember the number of defs and uses, so we can distinguish
new from old refs in the next pass. */
last_def_id = df->def_id;
@@ -1169,8 +1189,8 @@ reg_alloc (void)
dump_ra (df);
if (changed && (debug_new_regalloc & DUMP_RTL) != 0)
{
- ra_print_rtl_with_bb (rtl_dump_file, get_insns ());
- fflush (rtl_dump_file);
+ ra_print_rtl_with_bb (dump_file, get_insns ());
+ fflush (dump_file);
}
/* Reset the web lists. */
@@ -1194,27 +1214,28 @@ reg_alloc (void)
ra_debug_msg (DUMP_COSTS, "ticks for build-phase: %ld\n", ticks_build);
ra_debug_msg (DUMP_COSTS, "ticks for rebuild-phase: %ld\n", ticks_rebuild);
if ((debug_new_regalloc & (DUMP_FINAL_RTL | DUMP_RTL)) != 0)
- ra_print_rtl_with_bb (rtl_dump_file, get_insns ());
+ ra_print_rtl_with_bb (dump_file, get_insns ());
/* We might have new pseudos, so allocate the info arrays for them. */
if ((debug_new_regalloc & DUMP_SM) == 0)
- rtl_dump_file = NULL;
+ dump_file = NULL;
no_new_pseudos = 0;
allocate_reg_info (max_reg_num (), FALSE, FALSE);
while_newra = 1;
no_new_pseudos = 1;
newra_in_progress = 0;
- rtl_dump_file = ra_dump_file;
+ dump_file = ra_dump_file;
{
+ edge_iterator ei;
edge e;
- for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
{
basic_block bb = e->src;
- last = bb->end;
- for (last = bb->end; last; last = PREV_INSN (last))
+ last = BB_END (bb);
+ for (last = BB_END (bb); last; last = PREV_INSN (last))
{
- if (last == bb->head)
+ if (last == BB_HEAD (bb))
break;
if (bitmap_bit_p (use_insns, INSN_UID (last)))
delete_insn (last);
@@ -1236,14 +1257,14 @@ reg_alloc (void)
/* Cleanup the flow graph. */
if ((debug_new_regalloc & DUMP_LAST_FLOW) == 0)
- rtl_dump_file = NULL;
- life_analysis (get_insns (), rtl_dump_file,
+ dump_file = NULL;
+ life_analysis (dump_file,
PROP_DEATH_NOTES | PROP_LOG_LINKS | PROP_REG_INFO);
cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_UPDATE_LIFE);
recompute_reg_usage (get_insns (), TRUE);
- if (rtl_dump_file)
- dump_flow_info (rtl_dump_file);
- rtl_dump_file = ra_dump_file;
+ if (dump_file)
+ dump_flow_info (dump_file);
+ dump_file = ra_dump_file;
/* update_equiv_regs() can't be called after register allocation.
It might delete some pseudos, and insert other insns setting
@@ -1313,7 +1334,7 @@ reg_alloc (void)
= reg_renumber[i] >= 0 ? reg_renumber[i] : i;
/*mark_regs_live_at_end (EXIT_BLOCK_PTR->global_live_at_start);
update_life_info (NULL, UPDATE_LIFE_GLOBAL, PROP_DEATH_NOTES);*/
- life_analysis (get_insns (), rtl_dump_file, PROP_DEATH_NOTES);
+ life_analysis (get_insns (), dump_file, PROP_DEATH_NOTES);
for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
if (regno_reg_rtx[i] && GET_CODE (regno_reg_rtx[i]) == REG)
REGNO (regno_reg_rtx[i]) = i;
@@ -1322,16 +1343,17 @@ reg_alloc (void)
#endif
if ((debug_new_regalloc & DUMP_LAST_RTL) != 0)
- ra_print_rtl_with_bb (rtl_dump_file, get_insns ());
- dump_static_insn_cost (rtl_dump_file,
+ ra_print_rtl_with_bb (dump_file, get_insns ());
+ dump_static_insn_cost (dump_file,
"after allocation/spilling, before reload", NULL);
/* Allocate the reg_equiv_memory_loc array for reload. */
- reg_equiv_memory_loc = xcalloc (max_regno, sizeof (rtx));
+ VARRAY_GROW (reg_equiv_memory_loc_varray, max_regno);
+ reg_equiv_memory_loc = &VARRAY_RTX (reg_equiv_memory_loc_varray, 0);
/* And possibly initialize it. */
allocate_initial_values (reg_equiv_memory_loc);
/* And one last regclass pass just before reload. */
- regclass (get_insns (), max_reg_num (), rtl_dump_file);
+ regclass (get_insns (), max_reg_num (), dump_file);
BITMAP_XFREE (emitted_by_spill);
BITMAP_XFREE (spill_slot_regs);
#ifdef SPILLING_STATISTICS
diff --git a/gcc/ra.h b/gcc/ra.h
index 91f7af6aa71..dabdef4397a 100644
--- a/gcc/ra.h
+++ b/gcc/ra.h
@@ -18,6 +18,14 @@
with GCC; see the file COPYING. If not, write to the Free Software
Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+#ifndef GCC_RA_H
+#define GCC_RA_H
+
+#include "bitmap.h"
+#include "sbitmap.h"
+#include "hard-reg-set.h"
+#include "insn-modes.h"
+
/* Double linked list to implement the per-type lists of webs
and moves. */
struct dlist
@@ -35,7 +43,7 @@ struct dlist
#define DLIST_MOVE(l) ((l)->value.move)
/* Classification of a given node (i.e. what state it's in). */
-enum node_type
+enum ra_node_type
{
INITIAL = 0, FREE,
PRECOLORED,
@@ -229,7 +237,7 @@ struct web
unsigned int have_orig_conflicts:1;
/* Current state of the node. */
- ENUM_BITFIELD(node_type) type:5;
+ ENUM_BITFIELD(ra_node_type) type:5;
/* A regclass, combined from preferred and alternate class, or calculated
from usable_regs. Used only for debugging, and to determine
@@ -520,6 +528,9 @@ extern HARD_REG_SET never_use_colors;
extern HARD_REG_SET usable_regs[N_REG_CLASSES];
/* For each class C the count of hardregs in usable_regs[C]. */
extern unsigned int num_free_regs[N_REG_CLASSES];
+/* For each class C which has num_free_regs[C]==1, the color of the
+ single register in that class, -1 otherwise. */
+extern int single_reg_in_regclass[N_REG_CLASSES];
/* For each mode M the hardregs, which are MODE_OK for M, and have
enough space behind them to hold an M value. Additionally
if reg R is OK for mode M, but it needs two hardregs, then R+1 will
@@ -603,8 +614,8 @@ extern int flag_ra_pre_reload;
register allocator. */
extern int flag_ra_spanned_deaths_from_scratch;
-extern inline void * ra_alloc (size_t);
-extern inline void * ra_calloc (size_t);
+extern void * ra_alloc (size_t);
+extern void * ra_calloc (size_t);
extern int hard_regs_count (HARD_REG_SET);
extern rtx ra_emit_move_insn (rtx, rtx);
extern void ra_debug_msg (unsigned int, const char *, ...) ATTRIBUTE_PRINTF_2;
@@ -655,7 +666,7 @@ extern struct dlist * pop_list (struct dlist **);
extern void record_conflict (struct web *, struct web *);
extern int memref_is_stack_slot (rtx);
extern void build_i_graph (struct df *);
-extern void put_web (struct web *, enum node_type);
+extern void put_web (struct web *, enum ra_node_type);
extern void remove_web_from_list (struct web *);
extern void reset_lists (void);
extern struct web * alias (struct web *);
@@ -695,3 +706,5 @@ extern bitmap rewrite_stack_slots;
/* Number of generated stack slots for spilled webs. */
extern int stack_spill_slots_num;
#endif
+
+#endif /* GCC_RA_H */